summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-02-08 20:12:38 +0100
committerGitHub <noreply@github.com>2019-02-08 20:12:38 +0100
commitafd4d5f4a4b05f421e6f336b4d74a0d808be57ed (patch)
tree2ccb4a0bd9bda70c1c258dcb1b8aca8961d9ad30 /vendor
parent962850c6e0dfcee926af31fc0ad24f1f6c26f8ac (diff)
parent25a3923b61a5ca014318e6d957f68abd03947297 (diff)
downloadpodman-afd4d5f4a4b05f421e6f336b4d74a0d808be57ed.tar.gz
podman-afd4d5f4a4b05f421e6f336b4d74a0d808be57ed.tar.bz2
podman-afd4d5f4a4b05f421e6f336b4d74a0d808be57ed.zip
Merge pull request #2274 from baude/cobraprep
Migrate to cobra CLI
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/buildah/buildah.go26
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go23
-rw-r--r--vendor/github.com/containers/buildah/commit.go13
-rw-r--r--vendor/github.com/containers/buildah/config.go47
-rw-r--r--vendor/github.com/containers/buildah/docker/types.go18
-rw-r--r--vendor/github.com/containers/buildah/image.go40
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go17
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go431
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go185
-rw-r--r--vendor/github.com/containers/buildah/pull.go69
-rw-r--r--vendor/github.com/containers/buildah/run.go29
-rw-r--r--vendor/github.com/containers/buildah/util.go91
-rw-r--r--vendor/github.com/containers/buildah/util/util.go30
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf117
-rw-r--r--vendor/github.com/containers/image/copy/copy.go39
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go33
-rw-r--r--vendor/github.com/containers/image/docker/docker_image.go2
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go42
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go10
-rw-r--r--vendor/github.com/containers/image/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go107
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go92
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/LICENSE13
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/README.md23
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_others.go15
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_windows.go98
-rw-r--r--vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go46
-rw-r--r--vendor/github.com/spf13/cobra/LICENSE.txt174
-rw-r--r--vendor/github.com/spf13/cobra/README.md736
-rw-r--r--vendor/github.com/spf13/cobra/args.go89
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go584
-rw-r--r--vendor/github.com/spf13/cobra/cobra.go200
-rw-r--r--vendor/github.com/spf13/cobra/command.go1517
-rw-r--r--vendor/github.com/spf13/cobra/command_notwin.go5
-rw-r--r--vendor/github.com/spf13/cobra/command_win.go20
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go126
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.editorconfig5
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md11
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md8
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.gitignore6
-rw-r--r--vendor/gopkg.in/fsnotify.v1/.travis.yml30
-rw-r--r--vendor/gopkg.in/fsnotify.v1/AUTHORS52
-rw-r--r--vendor/gopkg.in/fsnotify.v1/CHANGELOG.md317
-rw-r--r--vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md77
-rw-r--r--vendor/gopkg.in/fsnotify.v1/README.md37
-rw-r--r--vendor/gopkg.in/fsnotify.v1/example_test.go42
-rw-r--r--vendor/gopkg.in/fsnotify.v1/fsnotify.go4
-rw-r--r--vendor/gopkg.in/fsnotify.v1/fsnotify_test.go70
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify.go66
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go229
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_test.go449
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go147
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_test.go1237
-rw-r--r--vendor/gopkg.in/fsnotify.v1/kqueue.go62
54 files changed, 7297 insertions, 661 deletions
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index cbdb5c9f9..8b9baea12 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -8,6 +8,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "time"
"github.com/containers/buildah/docker"
"github.com/containers/buildah/util"
@@ -25,7 +26,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.6-dev"
+ Version = "1.7-dev"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -175,6 +176,15 @@ type Builder struct {
// after processing the AddCapabilities set, when running commands in the container.
// If a capability appears in both lists, it will be dropped.
DropCapabilities []string
+ // PrependedEmptyLayers are history entries that we'll add to a
+ // committed image, after any history items that we inherit from a base
+ // image, but before the history item for the layer that we're
+ // committing.
+ PrependedEmptyLayers []v1.History
+ // AppendedEmptyLayers are history entries that we'll add to a
+ // committed image after the history item for the layer that we're
+ // committing.
+ AppendedEmptyLayers []v1.History
CommonBuildOpts *CommonBuildOptions
// TopLayer is the top layer of the image
@@ -209,11 +219,24 @@ type BuilderInfo struct {
DefaultCapabilities []string
AddCapabilities []string
DropCapabilities []string
+ History []v1.History
}
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
// This is used in the inspect command to display Manifest and Config as string and not []byte.
func GetBuildInfo(b *Builder) BuilderInfo {
+ history := copyHistory(b.OCIv1.History)
+ history = append(history, copyHistory(b.PrependedEmptyLayers)...)
+ now := time.Now().UTC()
+ created := &now
+ history = append(history, v1.History{
+ Created: created,
+ CreatedBy: b.ImageCreatedBy,
+ Author: b.Maintainer(),
+ Comment: b.ImageHistoryComment,
+ EmptyLayer: false,
+ })
+ history = append(history, copyHistory(b.AppendedEmptyLayers)...)
return BuilderInfo{
Type: b.Type,
FromImage: b.FromImage,
@@ -239,6 +262,7 @@ func GetBuildInfo(b *Builder) BuilderInfo {
DefaultCapabilities: append([]string{}, util.DefaultCapabilities...),
AddCapabilities: append([]string{}, b.AddCapabilities...),
DropCapabilities: append([]string{}, b.DropCapabilities...),
+ History: history,
}
}
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 6a1400e61..9bcac1683 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -351,21 +351,6 @@ func runUsingChrootMain() {
defer stdoutRead.Close()
defer stderrRead.Close()
}
- // A helper that returns false if err is an error that would cause us
- // to give up.
- logIfNotRetryable := func(err error, what string) (retry bool) {
- if err == nil {
- return true
- }
- if errno, isErrno := err.(syscall.Errno); isErrno {
- switch errno {
- case syscall.EINTR, syscall.EAGAIN:
- return true
- }
- }
- logrus.Error(what)
- return false
- }
for readFd, writeFd := range relays {
if err := unix.SetNonblock(readFd, true); err != nil {
logrus.Errorf("error setting descriptor %d (%s) non-blocking: %v", readFd, fdDesc[readFd], err)
@@ -388,7 +373,7 @@ func runUsingChrootMain() {
fds = append(fds, unix.PollFd{Fd: int32(fd), Events: unix.POLLIN | unix.POLLHUP})
}
_, err := unix.Poll(fds, pollTimeout)
- if !logIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) {
return
}
removeFds := make(map[int]struct{})
@@ -405,7 +390,7 @@ func runUsingChrootMain() {
}
b := make([]byte, 8192)
nread, err := unix.Read(int(rfd.Fd), b)
- logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
+ util.LogIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
if nread > 0 {
if wfd, ok := relays[int(rfd.Fd)]; ok {
nwritten, err := buffers[wfd].Write(b[:nread])
@@ -422,7 +407,7 @@ func runUsingChrootMain() {
// from this descriptor, read as much as there is to read.
for rfd.Revents&unix.POLLHUP == unix.POLLHUP {
nr, err := unix.Read(int(rfd.Fd), b)
- logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
+ util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err))
if nr <= 0 {
break
}
@@ -447,7 +432,7 @@ func runUsingChrootMain() {
for wfd, buffer := range buffers {
if buffer.Len() > 0 {
nwritten, err := unix.Write(wfd, buffer.Bytes())
- logIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err))
+ util.LogIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err))
if nwritten >= 0 {
_ = buffer.Next(nwritten)
}
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 591ead4e6..da28bea61 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -67,6 +67,10 @@ type CommitOptions struct {
OnBuild []string
// Parent is the base image that this image was created by.
Parent string
+
+ // OmitTimestamp forces epoch 0 as created timestamp to allow for
+ // deterministic, content-addressable builds.
+ OmitTimestamp bool
}
// PushOptions can be used to alter how an image is copied somewhere.
@@ -97,6 +101,10 @@ type PushOptions struct {
// regenerate from on-disk layers, substituting them in the list of
// blobs to copy whenever possible.
BlobDirectory string
+ // Quiet is a boolean value that determines if minimal output to
+ // the user will be displayed, this is best used for logging.
+ // The default is false.
+ Quiet bool
}
// Commit writes the contents of the container, along with its updated
@@ -140,7 +148,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
}
}
- src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp)
+ src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp, options.OmitTimestamp)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
}
@@ -224,6 +232,9 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+ if options.Quiet {
+ options.ReportWriter = nil // Turns off logging output
+ }
blocked, err := isReferenceBlocked(dest, systemContext)
if err != nil {
return nil, "", errors.Wrapf(err, "error checking if pushing to registry for %q is blocked", transports.ImageName(dest))
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 114b8ca37..05b0abb23 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -574,3 +574,50 @@ func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
}
}
}
+
+// AddPrependedEmptyLayer adds an item to the history that we'll create when
+// commiting the image, after any history we inherit from the base image, but
+// before the history item that we'll use to describe the new layer that we're
+// adding.
+func (b *Builder) AddPrependedEmptyLayer(created *time.Time, createdBy, author, comment string) {
+ if created != nil {
+ copiedTimestamp := *created
+ created = &copiedTimestamp
+ }
+ b.PrependedEmptyLayers = append(b.PrependedEmptyLayers, ociv1.History{
+ Created: created,
+ CreatedBy: createdBy,
+ Author: author,
+ Comment: comment,
+ EmptyLayer: true,
+ })
+}
+
+// ClearPrependedEmptyLayers clears the list of history entries that we'll add
+// to the committed image before the entry for the layer that we're adding.
+func (b *Builder) ClearPrependedEmptyLayers() {
+ b.PrependedEmptyLayers = nil
+}
+
+// AddAppendedEmptyLayer adds an item to the history that we'll create when
+// commiting the image, after the history item that we'll use to describe the
+// new layer that we're adding.
+func (b *Builder) AddAppendedEmptyLayer(created *time.Time, createdBy, author, comment string) {
+ if created != nil {
+ copiedTimestamp := *created
+ created = &copiedTimestamp
+ }
+ b.AppendedEmptyLayers = append(b.AppendedEmptyLayers, ociv1.History{
+ Created: created,
+ CreatedBy: createdBy,
+ Author: author,
+ Comment: comment,
+ EmptyLayer: true,
+ })
+}
+
+// ClearAppendedEmptyLayers clears the list of history entries that we'll add
+// to the committed image after the entry for the layer that we're adding.
+func (b *Builder) ClearAppendedEmptyLayers() {
+ b.AppendedEmptyLayers = nil
+}
diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go
index 6847d36fd..c59be0e60 100644
--- a/vendor/github.com/containers/buildah/docker/types.go
+++ b/vendor/github.com/containers/buildah/docker/types.go
@@ -18,7 +18,7 @@ const TypeLayers = "layers"
const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
// github.com/moby/moby/image/rootfs.go
-// RootFS describes images root filesystem
+// V2S2RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into an interface that supports different implementations.
type V2S2RootFS struct {
@@ -27,7 +27,7 @@ type V2S2RootFS struct {
}
// github.com/moby/moby/image/image.go
-// History stores build commands that were used to create an image
+// V2S2History stores build commands that were used to create an image
type V2S2History struct {
// Created is the timestamp at which the image was created
Created time.Time `json:"created"`
@@ -158,7 +158,7 @@ type V1Image struct {
}
// github.com/moby/moby/image/image.go
-// Image stores the image configuration
+// V2Image stores the image configuration
type V2Image struct {
V1Image
Parent ID `json:"parent,omitempty"`
@@ -176,7 +176,7 @@ type V2Image struct {
}
// github.com/docker/distribution/manifest/versioned.go
-// Versioned provides a struct with the manifest schemaVersion and mediaType.
+// V2Versioned provides a struct with the manifest schemaVersion and mediaType.
// Incoming content with unknown schema version can be decoded against this
// struct to check the version.
type V2Versioned struct {
@@ -188,21 +188,21 @@ type V2Versioned struct {
}
// github.com/docker/distribution/manifest/schema1/manifest.go
-// FSLayer is a container struct for BlobSums defined in an image manifest
+// V2S1FSLayer is a container struct for BlobSums defined in an image manifest
type V2S1FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer
BlobSum digest.Digest `json:"blobSum"`
}
// github.com/docker/distribution/manifest/schema1/manifest.go
-// History stores unstructured v1 compatibility information
+// V2S1History stores unstructured v1 compatibility information
type V2S1History struct {
// V1Compatibility is the raw v1 compatibility information
V1Compatibility string `json:"v1Compatibility"`
}
// github.com/docker/distribution/manifest/schema1/manifest.go
-// Manifest provides the base accessible fields for working with V2 image
+// V2S1Manifest provides the base accessible fields for working with V2 image
// format in the registry.
type V2S1Manifest struct {
V2Versioned
@@ -225,7 +225,7 @@ type V2S1Manifest struct {
}
// github.com/docker/distribution/blobs.go
-// Descriptor describes targeted content. Used in conjunction with a blob
+// V2S2Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
@@ -250,7 +250,7 @@ type V2S2Descriptor struct {
}
// github.com/docker/distribution/manifest/schema2/manifest.go
-// Manifest defines a schema2 manifest.
+// V2S2Manifest defines a schema2 manifest.
type V2S2Manifest struct {
V2Versioned
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 47842db7d..b0876fb6d 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -58,6 +58,8 @@ type containerImageRef struct {
tarPath func(path string) (io.ReadCloser, error)
parent string
blobDirectory string
+ preEmptyLayers []v1.History
+ postEmptyLayers []v1.History
}
type containerImageSource struct {
@@ -396,6 +398,35 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
// Build history notes in the image configurations.
+ appendHistory := func(history []v1.History) {
+ for i := range history {
+ var created *time.Time
+ if history[i].Created != nil {
+ copiedTimestamp := *history[i].Created
+ created = &copiedTimestamp
+ }
+ onews := v1.History{
+ Created: created,
+ CreatedBy: history[i].CreatedBy,
+ Author: history[i].Author,
+ Comment: history[i].Comment,
+ EmptyLayer: true,
+ }
+ oimage.History = append(oimage.History, onews)
+ if created == nil {
+ created = &time.Time{}
+ }
+ dnews := docker.V2S2History{
+ Created: *created,
+ CreatedBy: history[i].CreatedBy,
+ Author: history[i].Author,
+ Comment: history[i].Comment,
+ EmptyLayer: true,
+ }
+ dimage.History = append(dimage.History, dnews)
+ }
+ }
+ appendHistory(i.preEmptyLayers)
onews := v1.History{
Created: &i.created,
CreatedBy: i.createdBy,
@@ -412,6 +443,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
EmptyLayer: false,
}
dimage.History = append(dimage.History, dnews)
+ appendHistory(i.postEmptyLayers)
dimage.Parent = docker.ID(digest.FromString(i.parent))
// Sanity check that we didn't just create a mismatch between non-empty layers in the
@@ -603,7 +635,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
}
-func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
+func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time, omitTimestamp bool) (types.ImageReference, error) {
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
@@ -630,6 +662,10 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
created = historyTimestamp.UTC()
}
+ if omitTimestamp {
+ created = time.Unix(0, 0)
+ }
+
ref := &containerImageRef{
store: b.store,
compression: compress,
@@ -650,6 +686,8 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
tarPath: b.tarPath(),
parent: parent,
blobDirectory: blobDirectory,
+ preEmptyLayers: b.PrependedEmptyLayers,
+ postEmptyLayers: b.AppendedEmptyLayers,
}
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 217bcfc79..56ab7aa57 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -1311,13 +1311,17 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
// Check if we have a one line Dockerfile making layers irrelevant
// or the user told us to ignore layers.
- ignoreLayers := (len(stages) < 2 && len(stages[0].Node.Children) < 2) || (!b.layers && !b.noCache)
+ singleLineDockerfile := (len(stages) < 2 && len(stages[0].Node.Children) < 1)
+ ignoreLayers := singleLineDockerfile || !b.layers && !b.noCache
if ignoreLayers {
imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "")
if err != nil {
return "", nil, err
}
+ if singleLineDockerfile {
+ b.log("COMMIT %s", ref)
+ }
imageID = imgID
imageRef = ref
}
@@ -1527,6 +1531,17 @@ func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
return lastErr
}
+func (b *Executor) EnsureContainerPath(path string) error {
+ _, err := os.Stat(filepath.Join(b.mountPoint, path))
+ if err != nil && os.IsNotExist(err) {
+ err = os.MkdirAll(filepath.Join(b.mountPoint, path), 0755)
+ }
+ if err != nil {
+ return errors.Wrapf(err, "error ensuring container path %q", path)
+ }
+ return nil
+}
+
// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input
// dockerfile content and will use ctxDir as the base include path.
//
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index dc1b2bc69..bbbbf3476 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -13,263 +13,183 @@ import (
"github.com/containers/buildah/util"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
- "github.com/urfave/cli"
+ "github.com/spf13/pflag"
)
-var (
- usernsFlags = []cli.Flag{
- cli.StringFlag{
- Name: "userns",
- Usage: "'container', `path` of user namespace to join, or 'host'",
- },
- cli.StringSliceFlag{
- Name: "userns-uid-map",
- Usage: "`containerID:hostID:length` UID mapping to use in user namespace",
- },
- cli.StringSliceFlag{
- Name: "userns-gid-map",
- Usage: "`containerID:hostID:length` GID mapping to use in user namespace",
- },
- cli.StringFlag{
- Name: "userns-uid-map-user",
- Usage: "`name` of entries from /etc/subuid to use to set user namespace UID mapping",
- },
- cli.StringFlag{
- Name: "userns-gid-map-group",
- Usage: "`name` of entries from /etc/subgid to use to set user namespace GID mapping",
- },
- }
+// LayerResults represents the results of the layer flags
+type LayerResults struct {
+ ForceRm bool
+ Layers bool
+}
- NamespaceFlags = []cli.Flag{
- cli.StringFlag{
- Name: string(specs.IPCNamespace),
- Usage: "'container', `path` of IPC namespace to join, or 'host'",
- },
- cli.StringFlag{
- Name: string(specs.NetworkNamespace) + ", net",
- Usage: "'container', `path` of network namespace to join, or 'host'",
- },
- cli.StringFlag{
- Name: "cni-config-dir",
- Usage: "`directory` of CNI configuration files",
- Value: util.DefaultCNIConfigDir,
- },
- cli.StringFlag{
- Name: "cni-plugin-path",
- Usage: "`path` of CNI network plugins",
- Value: util.DefaultCNIPluginPath,
- },
- cli.StringFlag{
- Name: string(specs.PIDNamespace),
- Usage: "'container', `path` of PID namespace to join, or 'host'",
- },
- cli.StringFlag{
- Name: string(specs.UTSNamespace),
- Usage: "'container', `path` of UTS namespace to join, or 'host'",
- },
- }
+// UserNSResults represents the results for the UserNS flags
+type UserNSResults struct {
+ UserNS string
+ UserNSUIDMap []string
+ UserNSGIDMap []string
+ UserNSUIDMapUser string
+ UserNSGIDMapGroup string
+}
- LayerFlags = []cli.Flag{
- cli.BoolFlag{
- Name: "force-rm",
- Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
- },
- cli.BoolFlag{
- Name: "layers",
- Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()),
- },
- }
+// NameSpaceResults represents the results for Namespace flags
+type NameSpaceResults struct {
+ IPC string
+ Network string
+ CNIConfigDir string
+ CNIPlugInPath string
+ PID string
+ UTS string
+}
- BudFlags = []cli.Flag{
- cli.StringSliceFlag{
- Name: "annotation",
- Usage: "Set metadata for an image (default [])",
- },
- cli.StringFlag{
- Name: "authfile",
- Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
- },
- cli.StringSliceFlag{
- Name: "build-arg",
- Usage: "`argument=value` to supply to the builder",
- },
- cli.StringFlag{
- Name: "cache-from",
- Usage: "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Value: "",
- Usage: "use certificates at the specified path to access the registry",
- },
- cli.BoolFlag{
- Name: "compress",
- Usage: "This is legacy option, which has no effect on the image",
- },
- cli.StringFlag{
- Name: "creds",
- Value: "",
- Usage: "use `[username[:password]]` for accessing the registry",
- },
- cli.BoolFlag{
- Name: "disable-compression, D",
- Usage: "don't compress layers by default",
- },
- cli.BoolFlag{
- Name: "disable-content-trust",
- Usage: "This is a Docker specific option and is a NOOP",
- },
- cli.StringSliceFlag{
- Name: "file, f",
- Usage: "`pathname or URL` of a Dockerfile",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.",
- Value: DefaultFormat(),
- },
- cli.StringFlag{
- Name: "iidfile",
- Usage: "`file` to write the image ID to",
- },
- cli.StringSliceFlag{
- Name: "label",
- Usage: "Set metadata for an image (default [])",
- },
- cli.BoolFlag{
- Name: "no-cache",
- Usage: "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.",
- },
- cli.StringFlag{
- Name: "logfile",
- Usage: "log to `file` instead of stdout/stderr",
- },
- cli.IntFlag{
- Name: "loglevel",
- Usage: "adjust logging level (range from -2 to 3)",
- },
- cli.StringFlag{
- Name: "platform",
- Usage: "CLI compatibility: no action or effect",
- },
- cli.BoolTFlag{
- Name: "pull",
- Usage: "pull the image if not present",
- },
- cli.BoolFlag{
- Name: "pull-always",
- Usage: "pull the image, even if a version is present",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "refrain from announcing build instructions and image read/write progress",
- },
- cli.BoolTFlag{
- Name: "rm",
- Usage: "Remove intermediate containers after a successful build (default true)",
- },
- cli.StringFlag{
- Name: "runtime",
- Usage: "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.",
- Value: util.Runtime(),
- },
- cli.StringSliceFlag{
- Name: "runtime-flag",
- Usage: "add global flags for the container runtime",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`pathname` of signature policy file (not usually used)",
- },
- cli.BoolFlag{
- Name: "squash",
- Usage: "Squash newly built layers into a single new layer. The build process does not currently support caching so this is a NOOP.",
- },
- cli.StringSliceFlag{
- Name: "tag, t",
- Usage: "tagged `name` to apply to the built image",
- },
- cli.BoolTFlag{
- Name: "tls-verify",
- Usage: "require HTTPS and verify certificates when accessing the registry",
- },
- }
+// BudResults represents the results for Bud flags
+type BudResults struct {
+ Annotation []string
+ Authfile string
+ BuildArg []string
+ CacheFrom string
+ CertDir string
+ Compress bool
+ Creds string
+ DisableCompression bool
+ DisableContentTrust bool
+ File []string
+ Format string
+ Iidfile string
+ NoCache bool
+ Label []string
+ Logfile string
+ Loglevel int
+ Platform string
+ Pull bool
+ PullAlways bool
+ Quiet bool
+ Rm bool
+ Runtime string
+ RuntimeOpts []string
+ SignaturePolicy string
+ Squash bool
+ Tag []string
+ TlsVerify bool
+}
- FromAndBudFlags = append(append([]cli.Flag{
- cli.StringSliceFlag{
- Name: "add-host",
- Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])",
- },
- cli.StringFlag{
- Name: "blob-cache",
- Value: "",
- Usage: "assume image blobs in the specified directory will be available for pushing",
- Hidden: true, // this is here mainly so that we can test the API during integration tests
- },
- cli.StringSliceFlag{
- Name: "cap-add",
- Usage: "add the specified capability when running (default [])",
- },
- cli.StringSliceFlag{
- Name: "cap-drop",
- Usage: "drop the specified capability when running (default [])",
- },
- cli.StringFlag{
- Name: "cgroup-parent",
- Usage: "optional parent cgroup for the container",
- },
- cli.Uint64Flag{
- Name: "cpu-period",
- Usage: "limit the CPU CFS (Completely Fair Scheduler) period",
- },
- cli.Int64Flag{
- Name: "cpu-quota",
- Usage: "limit the CPU CFS (Completely Fair Scheduler) quota",
- },
- cli.Uint64Flag{
- Name: "cpu-shares, c",
- Usage: "CPU shares (relative weight)",
- },
- cli.StringFlag{
- Name: "cpuset-cpus",
- Usage: "CPUs in which to allow execution (0-3, 0,1)",
- },
- cli.StringFlag{
- Name: "cpuset-mems",
- Usage: "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
- },
- cli.StringFlag{
- Name: "isolation",
- Usage: "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.",
- Value: DefaultIsolation(),
- },
- cli.StringFlag{
- Name: "memory, m",
- Usage: "memory limit (format: <number>[<unit>], where unit = b, k, m or g)",
- },
- cli.StringFlag{
- Name: "memory-swap",
- Usage: "swap limit equal to memory plus swap: '-1' to enable unlimited swap",
- },
- cli.StringSliceFlag{
- Name: "security-opt",
- Usage: "security options (default [])",
- },
- cli.StringFlag{
- Name: "shm-size",
- Usage: "size of '/dev/shm'. The format is `<number><unit>`.",
- Value: "65536k",
- },
- cli.StringSliceFlag{
- Name: "ulimit",
- Usage: "ulimit options (default [])",
- },
- cli.StringSliceFlag{
- Name: "volume, v",
- Usage: "bind mount a volume into the container (default [])",
- },
- }, usernsFlags...), NamespaceFlags...)
-)
+// FromAndBugResults represents the results for common flags
+// in bud and from
+type FromAndBudResults struct {
+ AddHost []string
+ BlobCache string
+ CapAdd []string
+ CapDrop []string
+ CgroupParent string
+ CPUPeriod uint64
+ CPUQuota int64
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares uint64
+ Isolation string
+ Memory string
+ MemorySwap string
+ SecurityOpt []string
+ ShmSize string
+ Ulimit []string
+ Volume []string
+}
+
+// GetUserNSFlags returns the common flags for usernamespace
+func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
+ usernsFlags := pflag.FlagSet{}
+ usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
+ usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerID:hostID:length` UID mapping to use in user namespace")
+ usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerID:hostID:length` GID mapping to use in user namespace")
+ usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
+ usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
+ return usernsFlags
+}
+
+// GetNameSpaceFlags returns the common flags for a namespace menu
+func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'container', `path` of IPC namespace to join, or 'host'")
+ fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'container', `path` of network namespace to join, or 'host'")
+ // TODO How do we alias net and network?
+ fs.StringVar(&flags.Network, "net", "", "'container', `path` of network namespace to join, or 'host'")
+ fs.MarkHidden("net")
+ fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", util.DefaultCNIConfigDir, "`directory` of CNI configuration files")
+ fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", util.DefaultCNIPluginPath, "`path` of CNI network plugins")
+ fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "container, `path` of PID namespace to join, or 'host'")
+ fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "container, :`path` of UTS namespace to join, or 'host'")
+ return fs
+}
+
+// GetLayerFlags returns the common flags for layers
+func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.")
+ fs.BoolVar(&flags.Layers, "layers", false, fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()))
+ return fs
+}
+
+// GetBudFlags returns common bud flags
+func GetBudFlags(flags *BudResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringSliceVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
+ fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
+ fs.StringSliceVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
+ fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
+ fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
+ fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image")
+ fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", false, "don't compress layers by default")
+ fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP")
+ fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
+ fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
+ fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
+ fs.StringSliceVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
+ fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
+ fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
+ fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
+ fs.StringVar(&flags.Platform, "platform", "", "CLI compatibility: no action or effect")
+ fs.BoolVar(&flags.Pull, "pull", true, "pull the image if not present")
+ fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image, even if a version is present")
+ fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
+ fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build (default true)")
+ fs.StringVar(&flags.Runtime, "runtime", util.Runtime(), "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.")
+ fs.StringSliceVar(&flags.RuntimeOpts, "runtime-flag", []string{}, "add global flags for the container runtime")
+ fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer. The build process does not currently support caching so this is a NOOP.")
+ fs.StringSliceVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
+ fs.BoolVar(&flags.TlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
+ return fs
+}
+
+func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) pflag.FlagSet {
+ fs := pflag.FlagSet{}
+ fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
+ fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
+ fs.MarkHidden("blob-cache")
+ fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
+ fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
+ fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
+ fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
+ fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
+ fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
+ fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
+ fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
+ fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
+ fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
+ fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
+ fs.StringSliceVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
+ fs.StringVar(&flags.ShmSize, "shm-size", "65536k", "size of '/dev/shm'. The format is `<number><unit>`.")
+ fs.StringSliceVar(&flags.Ulimit, "ulimit", []string{}, "ulimit options (default [])")
+ fs.StringSliceVarP(&flags.Volume, "volume", "v", []string{}, "bind mount a volume into the container (default [])")
+
+ // Add in the usernamespace and namespaceflags
+ usernsFlags := GetUserNSFlags(usernsResults)
+ namespaceFlags := GetNameSpaceFlags(namespaceResults)
+ fs.AddFlagSet(&usernsFlags)
+ fs.AddFlagSet(&namespaceFlags)
+
+ return fs
+}
// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
// otherwise it returns false
@@ -299,6 +219,15 @@ func DefaultIsolation() string {
return buildah.OCI
}
+// DefaultHistory returns the default add-history setting
+func DefaultHistory() bool {
+ history := os.Getenv("BUILDAH_HISTORY")
+ if strings.ToLower(history) == "true" || history == "1" {
+ return true
+ }
+ return false
+}
+
func VerifyFlagsArgsOrder(args []string) error {
for _, arg := range args {
if strings.HasPrefix(arg, "-") {
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 41fdea8b1..ffc7c15bb 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -6,11 +6,10 @@ package parse
import (
"fmt"
+ "github.com/spf13/cobra"
"net"
"os"
"path/filepath"
- "reflect"
- "regexp"
"strconv"
"strings"
"unicode"
@@ -22,7 +21,6 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/urfave/cli"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/sys/unix"
)
@@ -35,7 +33,7 @@ const (
)
// CommonBuildOptions parses the build options from the bud cli
-func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) {
+func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
var (
memoryLimit int64
memorySwap int64
@@ -49,47 +47,57 @@ func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) {
if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil {
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max))
}
- if c.String("memory") != "" {
- memoryLimit, err = units.RAMInBytes(c.String("memory"))
+ memVal, _ := c.Flags().GetString("memory")
+ if memVal != "" {
+ memoryLimit, err = units.RAMInBytes(memVal)
if err != nil {
return nil, errors.Wrapf(err, "invalid value for memory")
}
}
- if c.String("memory-swap") != "" {
- memorySwap, err = units.RAMInBytes(c.String("memory-swap"))
+
+ memSwapValue, _ := c.Flags().GetString("memory-swap")
+ if memSwapValue != "" {
+ memorySwap, err = units.RAMInBytes(memSwapValue)
if err != nil {
return nil, errors.Wrapf(err, "invalid value for memory-swap")
}
}
- if len(c.StringSlice("add-host")) > 0 {
- for _, host := range c.StringSlice("add-host") {
+
+ addHost, _ := c.Flags().GetStringSlice("add-host")
+ if len(addHost) > 0 {
+ for _, host := range addHost {
if err := validateExtraHost(host); err != nil {
return nil, errors.Wrapf(err, "invalid value for add-host")
}
}
}
- if _, err := units.FromHumanSize(c.String("shm-size")); err != nil {
+ if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil {
return nil, errors.Wrapf(err, "invalid --shm-size")
}
- if err := ParseVolumes(c.StringSlice("volume")); err != nil {
+ volumes, _ := c.Flags().GetStringSlice("volume")
+ if err := ParseVolumes(volumes); err != nil {
return nil, err
}
-
+ cpuPeriod, _ := c.Flags().GetUint64("cpu-period")
+ cpuQuota, _ := c.Flags().GetInt64("cpu-quota")
+ cpuShares, _ := c.Flags().GetUint64("cpu-shared")
+ ulimit, _ := c.Flags().GetStringSlice("ulimit")
commonOpts := &buildah.CommonBuildOptions{
- AddHost: c.StringSlice("add-host"),
- CgroupParent: c.String("cgroup-parent"),
- CPUPeriod: c.Uint64("cpu-period"),
- CPUQuota: c.Int64("cpu-quota"),
- CPUSetCPUs: c.String("cpuset-cpus"),
- CPUSetMems: c.String("cpuset-mems"),
- CPUShares: c.Uint64("cpu-shares"),
+ AddHost: addHost,
+ CgroupParent: c.Flag("cgroup-parent").Value.String(),
+ CPUPeriod: cpuPeriod,
+ CPUQuota: cpuQuota,
+ CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(),
+ CPUSetMems: c.Flag("cpuset-mems").Value.String(),
+ CPUShares: cpuShares,
Memory: memoryLimit,
MemorySwap: memorySwap,
- ShmSize: c.String("shm-size"),
- Ulimit: append(defaultLimits, c.StringSlice("ulimit")...),
- Volumes: c.StringSlice("volume"),
+ ShmSize: c.Flag("shm-size").Value.String(),
+ Ulimit: append(defaultLimits, ulimit...),
+ Volumes: volumes,
}
- if err := parseSecurityOpts(c.StringSlice("security-opt"), commonOpts); err != nil {
+ securityOpts, _ := c.Flags().GetStringSlice("security-opt")
+ if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
return nil, err
}
return commonOpts, nil
@@ -231,79 +239,45 @@ func validateIPAddress(val string) (string, error) {
return "", fmt.Errorf("%s is not an ip address", val)
}
-// ValidateFlags searches for StringFlags or StringSlice flags that never had
-// a value set. This commonly occurs when the CLI mistakenly takes the next
-// option and uses it as a value.
-func ValidateFlags(c *cli.Context, flags []cli.Flag) error {
- re, err := regexp.Compile("^-.+")
- if err != nil {
- return errors.Wrap(err, "compiling regex failed")
- }
-
- // The --cmd flag can have a following command i.e. --cmd="--help".
- // Let's skip this check just for the --cmd flag.
- for _, flag := range flags {
- switch reflect.TypeOf(flag).String() {
- case "cli.StringSliceFlag":
- {
- f := flag.(cli.StringSliceFlag)
- name := strings.Split(f.Name, ",")
- if f.Name == "cmd" {
- continue
- }
- val := c.StringSlice(name[0])
- for _, v := range val {
- if ok := re.MatchString(v); ok {
- return errors.Errorf("option --%s requires a value", name[0])
- }
- }
- }
- case "cli.StringFlag":
- {
- f := flag.(cli.StringFlag)
- name := strings.Split(f.Name, ",")
- if f.Name == "cmd" {
- continue
- }
- val := c.String(name[0])
- if ok := re.MatchString(val); ok {
- return errors.Errorf("option --%s requires a value", name[0])
- }
- }
- }
- }
- return nil
-}
-
// SystemContextFromOptions returns a SystemContext populated with values
// per the input parameters provided by the caller for the use in authentication.
-func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
+func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
+ certDir, err := c.Flags().GetString("cert-dir")
+ if err != nil {
+ certDir = ""
+ }
ctx := &types.SystemContext{
- DockerCertPath: c.String("cert-dir"),
+ DockerCertPath: certDir,
}
- if c.IsSet("tls-verify") {
- ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
- ctx.OCIInsecureSkipTLSVerify = !c.BoolT("tls-verify")
- ctx.DockerDaemonInsecureSkipTLSVerify = !c.BoolT("tls-verify")
+ tlsVerify, err := c.Flags().GetBool("tls-verify")
+ if err == nil && c.Flag("tls-verify").Changed {
+ ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(tlsVerify)
+ ctx.OCIInsecureSkipTLSVerify = tlsVerify
+ ctx.DockerDaemonInsecureSkipTLSVerify = tlsVerify
}
- if c.IsSet("creds") {
+ creds, err := c.Flags().GetString("creds")
+ if err == nil && c.Flag("creds").Changed {
var err error
- ctx.DockerAuthConfig, err = getDockerAuth(c.String("creds"))
+ ctx.DockerAuthConfig, err = getDockerAuth(creds)
if err != nil {
return nil, err
}
}
- if c.IsSet("signature-policy") {
- ctx.SignaturePolicyPath = c.String("signature-policy")
+ sigPolicy, err := c.Flags().GetString("signature-policy")
+ if err == nil && c.Flag("signature-policy").Changed {
+ ctx.SignaturePolicyPath = sigPolicy
}
- if c.IsSet("authfile") {
- ctx.AuthFilePath = c.String("authfile")
+ authfile, err := c.Flags().GetString("authfile")
+ if err == nil && c.Flag("authfile").Changed {
+ ctx.AuthFilePath = authfile
}
- if c.GlobalIsSet("registries-conf") {
- ctx.SystemRegistriesConfPath = c.GlobalString("registries-conf")
+ regConf, err := c.Flags().GetString("registries-conf")
+ if err == nil && c.Flag("registries-conf").Changed {
+ ctx.SystemRegistriesConfPath = regConf
}
- if c.GlobalIsSet("registries-conf-dir") {
- ctx.RegistriesDirPath = c.GlobalString("registries-conf-dir")
+ regConfDir, err := c.Flags().GetString("registries-conf-dir")
+ if err == nil && c.Flag("registries-conf-dir").Changed {
+ ctx.RegistriesDirPath = regConfDir
}
ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", buildah.Version)
return ctx, nil
@@ -345,9 +319,9 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
}
// IDMappingOptions parses the build options related to user namespaces and ID mapping.
-func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, idmapOptions *buildah.IDMappingOptions, err error) {
- user := c.String("userns-uid-map-user")
- group := c.String("userns-gid-map-group")
+func IDMappingOptions(c *cobra.Command) (usernsOptions buildah.NamespaceOptions, idmapOptions *buildah.IDMappingOptions, err error) {
+ user := c.Flag("userns-uid-map-user").Value.String()
+ group := c.Flag("userns-gid-map-group").Value.String()
// If only the user or group was specified, use the same value for the
// other, since we need both in order to initialize the maps using the
// names.
@@ -366,6 +340,7 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
}
mappings = submappings
}
+ globalOptions := c.PersistentFlags()
// We'll parse the UID and GID mapping options the same way.
buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) {
outmap := make([]specs.LinuxIDMapping, 0, len(basemap))
@@ -380,11 +355,11 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
// Parse the flag's value as one or more triples (if it's even
// been set), and append them.
var spec []string
- if c.GlobalIsSet(option) {
- spec = c.GlobalStringSlice(option)
+ if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed {
+ spec, _ = globalOptions.GetStringSlice(option)
}
- if c.IsSet(option) {
- spec = c.StringSlice(option)
+ if c.Flag(option).Changed {
+ spec, _ = c.Flags().GetStringSlice(option)
}
idmap, err := parseIDMap(spec)
if err != nil {
@@ -424,8 +399,8 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
}
// If the user specifically requested that we either use or don't use
// user namespaces, override that default.
- if c.IsSet("userns") {
- how := c.String("userns")
+ if c.Flag("userns").Changed {
+ how := c.Flag("userns").Value.String()
switch how {
case "", "container":
usernsOption.Host = false
@@ -440,7 +415,12 @@ func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, i
}
}
usernsOptions = buildah.NamespaceOptions{usernsOption}
- if !c.IsSet("net") {
+
+ // Because --net and --network are technically two different flags, we need
+ // to check each for nil and .Changed
+ usernet := c.Flags().Lookup("net")
+ usernetwork := c.Flags().Lookup("network")
+ if (usernet != nil && usernetwork != nil) && (!usernet.Changed && !usernetwork.Changed) {
usernsOptions = append(usernsOptions, buildah.NamespaceOption{
Name: string(specs.NetworkNamespace),
Host: usernsOption.Host,
@@ -486,12 +466,12 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) {
}
// NamespaceOptions parses the build options for all namespaces except for user namespace.
-func NamespaceOptions(c *cli.Context) (namespaceOptions buildah.NamespaceOptions, networkPolicy buildah.NetworkConfigurationPolicy, err error) {
+func NamespaceOptions(c *cobra.Command) (namespaceOptions buildah.NamespaceOptions, networkPolicy buildah.NetworkConfigurationPolicy, err error) {
options := make(buildah.NamespaceOptions, 0, 7)
policy := buildah.NetworkDefault
- for _, what := range []string{string(specs.IPCNamespace), "net", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
- if c.IsSet(what) {
- how := c.String(what)
+ for _, what := range []string{string(specs.IPCNamespace), "net", "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
+ if c.Flags().Lookup(what) != nil && c.Flag(what).Changed {
+ how := c.Flag(what).Value.String()
switch what {
case "net", "network":
what = string(specs.NetworkNamespace)
@@ -560,9 +540,10 @@ func defaultIsolation() (buildah.Isolation, error) {
}
// IsolationOption parses the --isolation flag.
-func IsolationOption(c *cli.Context) (buildah.Isolation, error) {
- if c.String("isolation") != "" {
- switch strings.ToLower(c.String("isolation")) {
+func IsolationOption(c *cobra.Command) (buildah.Isolation, error) {
+ isolation, _ := c.Flags().GetString("isolation")
+ if isolation != "" {
+ switch strings.ToLower(isolation) {
case "oci":
return buildah.IsolationOCI, nil
case "rootless":
@@ -570,7 +551,7 @@ func IsolationOption(c *cli.Context) (buildah.Isolation, error) {
case "chroot":
return buildah.IsolationChroot, nil
default:
- return 0, errors.Errorf("unrecognized isolation type %q", c.String("isolation"))
+ return 0, errors.Errorf("unrecognized isolation type %q", isolation)
}
}
return defaultIsolation()
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index e677c8925..aede1784b 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -2,12 +2,14 @@ package buildah
import (
"context"
+ "fmt"
"io"
"strings"
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
+ "github.com/containers/image/docker"
"github.com/containers/image/docker/reference"
tarfile "github.com/containers/image/docker/tarfile"
ociarchive "github.com/containers/image/oci/archive"
@@ -17,6 +19,7 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/storage"
+ "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -45,6 +48,13 @@ type PullOptions struct {
// store copies of layer blobs that we pull down, if any. It should
// already exist.
BlobDirectory string
+ // AllTags is a boolean value that determines if all tagged images
+ // will be downloaded from the repository. The default is false.
+ AllTags bool
+ // Quiet is a boolean value that determines if minimal output to
+ // the user will be displayed, this is best used for logging.
+ // The default is false.
+ Quiet bool
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) {
@@ -141,9 +151,64 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
}
// Pull copies the contents of the image from somewhere else to local storage.
-func Pull(ctx context.Context, imageName string, options PullOptions) (types.ImageReference, error) {
+func Pull(ctx context.Context, imageName string, options PullOptions) error {
+ spec := imageName
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
- return pullImage(ctx, options.Store, imageName, options, systemContext)
+ srcRef, err := alltransports.ParseImageName(spec)
+ if err != nil {
+ if options.Transport == "" {
+ options.Transport = util.DefaultTransport
+ }
+ logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, options.Transport, err)
+ transport := options.Transport
+ if transport != util.DefaultTransport {
+ transport = transport + ":"
+ }
+ spec = transport + spec
+ srcRef2, err2 := alltransports.ParseImageName(spec)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error parsing image name %q", imageName)
+ }
+ srcRef = srcRef2
+ }
+ if options.Quiet {
+ options.ReportWriter = nil // Turns off logging output
+ }
+ var names []string
+ if options.AllTags {
+ if srcRef.DockerReference() == nil {
+ return errors.New("Non-docker transport is currently not supported")
+ }
+ tags, err := docker.GetRepositoryTags(ctx, systemContext, srcRef)
+ if err != nil {
+ return errors.Wrapf(err, "error getting repository tags")
+ }
+ for _, tag := range tags {
+ name := spec + ":" + tag
+ names = append(names, name)
+ }
+ } else {
+ names = append(names, spec)
+ }
+ var errs *multierror.Error
+ for _, name := range names {
+ if options.ReportWriter != nil {
+ options.ReportWriter.Write([]byte("Pulling " + name + "\n"))
+ }
+ ref, err := pullImage(ctx, options.Store, name, options, systemContext)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ continue
+ }
+ img, err := is.Transport.GetStoreImage(options.Store, ref)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ continue
+ }
+ fmt.Printf("%s\n", img.ID)
+ }
+
+ return errs.ErrorOrNil()
}
func pullImage(ctx context.Context, store storage.Store, imageName string, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) {
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index a0627f489..3a248f4f2 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -1130,10 +1130,14 @@ func (b *Builder) Run(command []string, options RunOptions) error {
case IsolationChroot:
err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
case IsolationOCIRootless:
+ moreCreateArgs := []string{"--no-new-keyring"}
+ if options.NoPivot {
+ moreCreateArgs = append(moreCreateArgs, "--no-pivot")
+ }
if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil {
return err
}
- err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
default:
err = errors.Errorf("don't know how to run this command")
}
@@ -1912,21 +1916,6 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err)
}
}
- // A helper that returns false if err is an error that would cause us
- // to give up.
- logIfNotRetryable := func(err error, what string) (retry bool) {
- if err == nil {
- return true
- }
- if errno, isErrno := err.(syscall.Errno); isErrno {
- switch errno {
- case syscall.EINTR, syscall.EAGAIN:
- return true
- }
- }
- logrus.Errorf("%s: %v", what, err)
- return false
- }
// Pass data back and forth.
pollTimeout := -1
for len(relayMap) > 0 {
@@ -1941,7 +1930,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
buf := make([]byte, 8192)
// Wait for new data from any input descriptor, or a notification that we're done.
_, err := unix.Poll(pollFds, pollTimeout)
- if !logIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) {
return
}
removes := make(map[int]struct{})
@@ -1971,7 +1960,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
writeFD, needToRelay := relayMap[readFD]
if needToRelay {
n, err := unix.Read(readFD, buf)
- if !logIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) {
return
}
// If it's zero-length on our stdin and we're
@@ -1996,7 +1985,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
// descriptor, read all that there is to read.
for pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
nr, err := unix.Read(readFD, buf)
- logIfNotRetryable(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err))
+ util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err))
if nr <= 0 {
break
}
@@ -2019,7 +2008,7 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy
for writeFD := range relayBuffer {
if relayBuffer[writeFD].Len() > 0 {
n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes())
- if !logIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
+ if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) {
return
}
if n > 0 {
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 5dadec7c2..08fb99706 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -1,10 +1,8 @@
package buildah
import (
- "archive/tar"
"io"
"os"
- "sync"
"github.com/containers/image/docker/reference"
"github.com/containers/image/pkg/sysregistries"
@@ -15,6 +13,7 @@ import (
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
+ "github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -43,6 +42,28 @@ func copyStringSlice(s []string) []string {
return t
}
+func copyHistory(history []v1.History) []v1.History {
+ if len(history) == 0 {
+ return nil
+ }
+ h := make([]v1.History, 0, len(history))
+ for _, entry := range history {
+ created := entry.Created
+ if created != nil {
+ timestamp := *created
+ created = &timestamp
+ }
+ h = append(h, v1.History{
+ Created: created,
+ CreatedBy: entry.CreatedBy,
+ Author: entry.Author,
+ Comment: entry.Comment,
+ EmptyLayer: entry.EmptyLayer,
+ })
+ }
+ return h
+}
+
func convertStorageIDMaps(UIDMap, GIDMap []idtools.IDMap) ([]rspec.LinuxIDMapping, []rspec.LinuxIDMapping) {
uidmap := make([]rspec.LinuxIDMapping, 0, len(UIDMap))
gidmap := make([]rspec.LinuxIDMapping, 0, len(GIDMap))
@@ -88,42 +109,7 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings)
- if hasher != nil {
- originalUntar := archiver.Untar
- archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- contentReader, contentWriter, err := os.Pipe()
- if err != nil {
- return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
- }
- defer contentReader.Close()
- defer contentWriter.Close()
- var hashError error
- var hashWorker sync.WaitGroup
- hashWorker.Add(1)
- go func() {
- t := tar.NewReader(contentReader)
- _, err := t.Next()
- if err != nil {
- hashError = err
- }
- if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
- hashError = err
- }
- hashWorker.Done()
- }()
- if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
- err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
- }
- hashWorker.Wait()
- if err == nil {
- err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
- }
- return err
- }
- }
- return archiver.CopyFileWithTar
+ return chrootarchive.CopyFileWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
}
// copyWithTar returns a function which copies a directory tree from outside of
@@ -131,15 +117,7 @@ func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) f
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings)
- if hasher != nil {
- originalUntar := archiver.Untar
- archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
- }
- }
- return archiver.CopyWithTar
+ return chrootarchive.CopyWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
}
// untarPath returns a function which extracts an archive in a specified
@@ -147,15 +125,7 @@ func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings)
- if hasher != nil {
- originalUntar := archiver.Untar
- archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
- }
- }
- return archiver.UntarPath
+ return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
}
// tarPath returns a function which creates an archive of a specified
@@ -163,14 +133,7 @@ func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(sr
// container's ID maps
func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) {
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
- tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
- return func(path string) (io.ReadCloser, error) {
- return archive.TarWithOptions(path, &archive.TarOptions{
- Compression: archive.Uncompressed,
- UIDMaps: tarMappings.UIDs(),
- GIDMaps: tarMappings.GIDs(),
- })
- }
+ return archive.TarPath(convertedUIDMap, convertedGIDMap)
}
// isRegistryBlocked checks if the named registry is marked as blocked
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index f4cac522e..e46f9b7cb 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -9,6 +9,7 @@ import (
"path"
"strconv"
"strings"
+ "syscall"
"github.com/containers/image/directory"
dockerarchive "github.com/containers/image/docker/archive"
@@ -419,3 +420,32 @@ func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error
}
return policyContext, nil
}
+
+// logIfNotErrno logs the error message unless err is either nil or one of the
+// listed syscall.Errno values. It returns true if it logged an error.
+func logIfNotErrno(err error, what string, ignores ...syscall.Errno) (logged bool) {
+ if err == nil {
+ return false
+ }
+ if errno, isErrno := err.(syscall.Errno); isErrno {
+ for _, ignore := range ignores {
+ if errno == ignore {
+ return false
+ }
+ }
+ }
+ logrus.Error(what)
+ return true
+}
+
+// LogIfNotRetryable logs "what" if err is set and is not an EINTR or EAGAIN
+// syscall.Errno. Returns "true" if we can continue.
+func LogIfNotRetryable(err error, what string) (retry bool) {
+ return !logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN)
+}
+
+// LogIfUnexpectedWhileDraining logs "what" if err is set and is not an EINTR
+// or EAGAIN or EIO syscall.Errno.
+func LogIfUnexpectedWhileDraining(err error, what string) {
+ logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN, syscall.EIO)
+}
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index ee8192935..bda5f3965 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -1,68 +1,79 @@
-github.com/Azure/go-ansiterm master
-github.com/blang/semver master
-github.com/BurntSushi/toml master
-github.com/containerd/continuity master
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
+github.com/blang/semver v3.5.0
+github.com/BurntSushi/toml v0.2.0
+github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image 0c6cc8e1420001ae39fa89d308b3b3bc5ee81c57
-github.com/boltdb/bolt master
-github.com/containers/libpod c8eaf59d5f4bec249db8134c6a9fcfbcac792519
-github.com/containers/storage 60a692f7ce891feb91ce0eda87bd06bfd5651dff
+github.com/containers/image v1.3
+github.com/boltdb/bolt v1.3.1
+github.com/containers/libpod v1.0
+github.com/containers/storage v1.9
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
-github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
-github.com/docker/engine-api master
-github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
-github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
+github.com/docker/docker-credential-helpers v0.6.1
+github.com/docker/go-connections v0.4.0
+github.com/docker/go-units v0.3.2
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
-github.com/fsouza/go-dockerclient master
-github.com/ghodss/yaml master
-github.com/gogo/protobuf master
-github.com/golang/glog master
-github.com/gorilla/context master
-github.com/gorilla/mux master
-github.com/hashicorp/errwrap master
-github.com/hashicorp/go-cleanhttp master
-github.com/hashicorp/go-multierror master
-github.com/imdario/mergo master
-github.com/mattn/go-runewidth master
-github.com/mattn/go-shellwords master
-github.com/Microsoft/go-winio master
-github.com/Microsoft/hcsshim master
-github.com/mistifyio/go-zfs master
+github.com/fsouza/go-dockerclient 29c1814d12c072344bb91aac5d2ff719db39c523
+github.com/ghodss/yaml v1.0.0
+github.com/gogo/protobuf v1.2.0
+github.com/gorilla/context v1.1.1
+github.com/gorilla/mux v1.6.2
+github.com/hashicorp/errwrap v1.0.0
+github.com/hashicorp/go-multierror v1.0.0
+github.com/imdario/mergo v0.3.6
+github.com/mattn/go-runewidth v0.0.4
+github.com/mattn/go-shellwords v1.0.3
+github.com/Microsoft/go-winio v0.4.11
+github.com/Microsoft/hcsshim v0.8.3
+github.com/mistifyio/go-zfs v2.1.1
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
-github.com/mtrmac/gpgme master
-github.com/Nvveen/Gotty master
+github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
+# TODO: Gotty has not been updated since 2012. Can we find a replacement?
+github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.0
-github.com/opencontainers/runc master
+github.com/opencontainers/runc v1.0.0-rc6
github.com/opencontainers/runtime-spec v1.0.0
-github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux master
-github.com/openshift/imagebuilder master
+github.com/opencontainers/runtime-tools v0.8.0
+github.com/opencontainers/selinux v1.1
+github.com/openshift/imagebuilder a4122153148e3b34161191f868565d8dffe65a69
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
-github.com/pborman/uuid master
-github.com/pkg/errors master
+github.com/pkg/errors v0.8.1
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
-github.com/seccomp/containers-golang master
-github.com/sirupsen/logrus master
-github.com/syndtr/gocapability master
-github.com/tchap/go-patricia master
-github.com/ulikunitz/xz v0.5.4
-github.com/urfave/cli 934abfb2f102315b5794e15ebc7949e4ca253920
+github.com/seccomp/libseccomp-golang v0.9.0
+github.com/seccomp/containers-golang v0.1
+github.com/sirupsen/logrus v1.0.0
+github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
+github.com/tchap/go-patricia v2.2.6
+github.com/ulikunitz/xz v0.5.5
github.com/vbatts/tar-split v0.10.2
-github.com/xeipuuv/gojsonpointer master
-github.com/xeipuuv/gojsonreference master
-github.com/xeipuuv/gojsonschema master
-golang.org/x/crypto master
-golang.org/x/net master
-golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
-golang.org/x/sys master
-golang.org/x/text master
+github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
+github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
+github.com/xeipuuv/gojsonschema v1.1.0
+golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 https://github.com/golang/crypto
+golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 https://github.com/golang/net
+golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync
+golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/golang/sys
+golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text
gopkg.in/cheggaaa/pb.v1 v1.0.27
-gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
-k8s.io/apimachinery master
-k8s.io/client-go master
-k8s.io/kubernetes master
+gopkg.in/yaml.v2 v2.2.2
+k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
github.com/klauspost/pgzip v1.2.1
github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0
+github.com/onsi/gomega v1.4.3
+github.com/spf13/cobra v0.0.3
+github.com/cpuguy83/go-md2man v1.0.8
+github.com/spf13/pflag v1.0.3
+github.com/inconshreveable/mousetrap v1.0.0
+github.com/russross/blackfriday v2.0.1
+github.com/mitchellh/go-homedir v1.0.0
+github.com/spf13/viper v1.3.1
+github.com/fsnotify/fsnotify v1.4.7
+github.com/hashicorp/hcl v1.0.0
+github.com/magiconair/properties v1.8.0
+github.com/mitchellh/mapstructure v1.1.2
+github.com/pelletier/go-toml v1.2.0
+github.com/spf13/afero v1.2.0
+github.com/spf13/cast v1.3.0
+github.com/spf13/jwalterweatherman v1.0.0
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index e783416ad..89c7e580f 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "os"
"reflect"
"runtime"
"strings"
@@ -23,7 +22,6 @@ import (
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "golang.org/x/crypto/ssh/terminal"
"golang.org/x/sync/semaphore"
pb "gopkg.in/cheggaaa/pb.v1"
)
@@ -86,7 +84,6 @@ type copier struct {
dest types.ImageDestination
rawSource types.ImageSource
reportWriter io.Writer
- progressOutput io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
blobInfoCache types.BlobInfoCache
@@ -155,19 +152,11 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
}()
- // If reportWriter is not a TTY (e.g., when piping to a file), do not
- // print the progress bars to avoid long and hard to parse output.
- // createProgressBar() will print a single line instead.
- progressOutput := reportWriter
- if !isTTY(reportWriter) {
- progressOutput = ioutil.Discard
- }
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
c := &copier{
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
- progressOutput: progressOutput,
progressInterval: options.ProgressInterval,
progress: options.Progress,
copyInParallel: copyInParallel,
@@ -405,30 +394,17 @@ func shortDigest(d digest.Digest) string {
return d.Encoded()[:12]
}
-// createProgressBar creates a pb.ProgressBar. Note that if the copier's
-// reportWriter is ioutil.Discard, the progress bar's output will be discarded
-// and a single line will be printed instead.
-func (c *copier) createProgressBar(srcInfo types.BlobInfo, kind string) *pb.ProgressBar {
+// createProgressBar creates a pb.ProgressBar.
+func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) *pb.ProgressBar {
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
bar.SetMaxWidth(80)
bar.ShowTimeLeft = false
bar.ShowPercent = false
bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest)))
- bar.Output = c.progressOutput
- if bar.Output == ioutil.Discard {
- c.Printf("Copying %s %s\n", kind, srcInfo.Digest)
- }
+ bar.Output = writer
return bar
}
-// isTTY returns true if the io.Writer is a file and a tty.
-func isTTY(w io.Writer) bool {
- if f, ok := w.(*os.File); ok {
- return terminal.IsTerminal(int(f.Fd()))
- }
- return false
-}
-
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos := ic.src.LayerInfos()
@@ -480,7 +456,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
bar.Finish()
} else {
cld.destInfo = srcLayer
- logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
+ logrus.Debugf("Skipping foreign layer %q copy to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest)))
bar.Add64(bar.Total)
bar.Finish()
@@ -493,13 +469,12 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
progressBars := make([]*pb.ProgressBar, numLayers)
for i, srcInfo := range srcInfos {
- bar := ic.c.createProgressBar(srcInfo, "blob")
+ bar := createProgressBar(srcInfo, "blob", nil)
progressBars[i] = bar
}
progressPool := pb.NewPool(progressBars...)
- progressPool.Output = ic.c.progressOutput
-
+ progressPool.Output = ic.c.reportWriter
if err := progressPool.Start(); err != nil {
return errors.Wrapf(err, "error creating progress-bar pool")
}
@@ -593,7 +568,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
if err != nil {
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
}
- bar := c.createProgressBar(srcInfo, "config")
+ bar := createProgressBar(srcInfo, "config", c.reportWriter)
defer bar.Finish()
bar.Start()
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index 43eb22ba2..23d2ac70f 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -91,6 +91,7 @@ type dockerClient struct {
password string
signatureBase signatureStorageBase
scope authScope
+ extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
@@ -281,7 +282,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
client.username = username
client.password = password
- resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
+ resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
if err != nil {
return err
}
@@ -361,8 +362,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
q.Set("n", strconv.Itoa(limit))
u.RawQuery = q.Encode()
- logrus.Debugf("trying to talk to v1 search endpoint")
- resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
+ logrus.Debugf("trying to talk to v1 search endpoint\n")
+ resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
@@ -378,8 +379,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
}
}
- logrus.Debugf("trying to talk to v2 search endpoint")
- resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil)
+ logrus.Debugf("trying to talk to v2 search endpoint\n")
+ resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
@@ -408,20 +409,20 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
-func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
if err := c.detectProperties(ctx); err != nil {
return nil, err
}
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
- return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
+ return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
}
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// streamLen, if not -1, specifies the length of the data expected on stream.
// makeRequest should generally be preferred.
// TODO(runcom): too many arguments here, use a struct
-func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
+func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
req, err := http.NewRequest(method, url, stream)
if err != nil {
return nil, err
@@ -440,7 +441,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
if auth == v2Auth {
- if err := c.setupRequestAuth(req, extraScope); err != nil {
+ if err := c.setupRequestAuth(req); err != nil {
return nil, err
}
}
@@ -459,7 +460,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
//
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
-func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error {
+func (c *dockerClient) setupRequestAuth(req *http.Request) error {
if len(c.challenges) == 0 {
return nil
}
@@ -473,10 +474,10 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
case "bearer":
cacheKey := ""
scopes := []authScope{c.scope}
- if extraScope != nil {
+ if c.extraScope != nil {
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
- cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
- scopes = append(scopes, *extraScope)
+ cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
+ scopes = append(scopes, *c.extraScope)
}
var token bearerToken
t, inCache := c.tokenCache.Load(cacheKey)
@@ -563,7 +564,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
ping := func(scheme string) error {
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return err
@@ -590,7 +591,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
// best effort to understand if we're talking to a V1 registry
pingV1 := func(scheme string) bool {
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return false
@@ -624,7 +625,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
// using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
- res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go
index 530c7513e..2ab95f329 100644
--- a/vendor/github.com/containers/image/docker/docker_image.go
+++ b/vendor/github.com/containers/image/docker/docker_image.go
@@ -66,7 +66,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
tags := make([]string, 0)
for {
- res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index e9882c024..973d160d0 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -113,7 +113,7 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
- return true
+ return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
@@ -140,7 +140,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
- res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
@@ -157,7 +157,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
- res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
if err != nil {
logrus.Debugf("Error uploading layer chunked, response %#v", res)
return types.BlobInfo{}, err
@@ -176,7 +176,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
- res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
@@ -194,10 +194,10 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
-func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
+func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
- res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
+ res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
return false, -1, err
}
@@ -218,7 +218,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
}
// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
-func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error {
+func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
u := url.URL{
Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
RawQuery: url.Values{
@@ -228,7 +228,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
}
mountPath := u.String()
logrus.Debugf("Trying to mount %s", mountPath)
- res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
+ res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
if err != nil {
return err
}
@@ -246,7 +246,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
}
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
- res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
+ res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
if err != nil {
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
} else {
@@ -276,7 +276,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
// First, check whether the blob happens to already exist at the destination.
- exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
+ exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
if err != nil {
return false, types.BlobInfo{}, err
}
@@ -286,6 +286,15 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
// Then try reusing blobs from other locations.
+
+ // Checking candidateRepo, and mounting from it, requires an expanded token scope.
+ // We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
+ if d.c.extraScope != nil {
+ return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
+ }
+ defer func() {
+ d.c.extraScope = nil
+ }()
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
@@ -305,10 +314,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
-
- // Checking candidateRepo, and mounting from it, requires an
- // expanded token scope.
- extraScope := &authScope{
+ d.c.extraScope = &authScope{
remoteName: reference.Path(candidateRepo),
actions: "pull",
}
@@ -319,7 +325,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
// so, be a nice client and don't create unnecesary upload sessions on the server.
- exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
+ exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
if err != nil {
logrus.Debugf("... Failed: %v", err)
continue
@@ -329,7 +335,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
continue // logrus.Debug() already happened in blobExists
}
if candidateRepo.Name() != d.ref.ref.Name() {
- if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil {
+ if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
logrus.Debugf("... Mount failed: %v", err)
continue
}
@@ -363,7 +369,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) erro
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
- res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
if err != nil {
return err
}
@@ -568,7 +574,7 @@ sigExists:
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
- res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index 063511535..c88ff2f34 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -89,7 +89,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
headers := make(map[string][]string)
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
- res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth)
if err != nil {
return nil, "", err
}
@@ -137,7 +137,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
err error
)
for _, url := range urls {
- resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
if err == nil {
if resp.StatusCode != http.StatusOK {
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
@@ -176,7 +176,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
if err != nil {
return nil, 0, err
}
@@ -340,7 +340,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err
}
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
- get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
+ get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth)
if err != nil {
return err
}
@@ -362,7 +362,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
// When retrieving the digest from a registry >= 2.3 use the following header:
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
- delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
+ delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go
index 10075992d..6644bcff3 100644
--- a/vendor/github.com/containers/image/version/version.go
+++ b/vendor/github.com/containers/image/version/version.go
@@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 5
+ VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 228d8bb82..ba1704250 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -13,6 +13,7 @@ import (
"path/filepath"
"runtime"
"strings"
+ "sync"
"syscall"
"github.com/containers/storage/pkg/fileutils"
@@ -23,6 +24,7 @@ import (
"github.com/containers/storage/pkg/system"
gzip "github.com/klauspost/pgzip"
rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -1331,3 +1333,108 @@ const (
// HeaderSize is the size in bytes of a tar header
HeaderSize = 512
)
+
+// NewArchiver returns a new Archiver
+func NewArchiver(idMappings *idtools.IDMappings) *Archiver {
+ if idMappings == nil {
+ idMappings = &idtools.IDMappings{}
+ }
+ return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
+}
+
+// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends
+func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver {
+ if tarIDMappings == nil {
+ tarIDMappings = &idtools.IDMappings{}
+ }
+ if untarIDMappings == nil {
+ untarIDMappings = &idtools.IDMappings{}
+ }
+ return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
+}
+
+// CopyFileWithTarAndChown returns a function which copies a single file from outside
+// of any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
+ contentReader, contentWriter, err := os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ }
+ defer contentReader.Close()
+ defer contentWriter.Close()
+ var hashError error
+ var hashWorker sync.WaitGroup
+ hashWorker.Add(1)
+ go func() {
+ t := tar.NewReader(contentReader)
+ _, err := t.Next()
+ if err != nil {
+ hashError = err
+ }
+ if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
+ hashError = err
+ }
+ hashWorker.Done()
+ }()
+ if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
+ err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ }
+ hashWorker.Wait()
+ if err == nil {
+ err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ }
+ return err
+ }
+ }
+ return archiver.CopyFileWithTar
+}
+
+// CopyWithTarAndChown returns a function which copies a directory tree from outside of
+// any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.CopyWithTar
+}
+
+// UntarPathAndChown returns a function which extracts an archive in a specified
+// location into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.UntarPath
+}
+
+// TarPath returns a function which creates an archive of a specified
+// location in the container's filesystem, mapping permissions using the
+// container's ID maps
+func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) {
+ tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ return func(path string) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{
+ Compression: Uncompressed,
+ UIDMaps: tarMappings.UIDs(),
+ GIDMaps: tarMappings.GIDs(),
+ })
+ }
+}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index dde8d44d3..a36ff1cb1 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -1,34 +1,32 @@
package chrootarchive
import (
+ "archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
+ "sync"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/pkg/errors"
)
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
- if idMappings == nil {
- idMappings = &idtools.IDMappings{}
- }
- return &archive.Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
+ archiver := archive.NewArchiver(idMappings)
+ archiver.Untar = Untar
+ return archiver
}
// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends
func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver {
- if tarIDMappings == nil {
- tarIDMappings = &idtools.IDMappings{}
- }
- if untarIDMappings == nil {
- untarIDMappings = &idtools.IDMappings{}
- }
- return &archive.Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
+ archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings)
+ archiver.Untar = Untar
+ return archiver
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
@@ -81,3 +79,75 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
return invokeUnpack(r, dest, options)
}
+
+// CopyFileWithTarAndChown returns a function which copies a single file from outside
+// of any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ contentReader, contentWriter, err := os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
+ }
+ defer contentReader.Close()
+ defer contentWriter.Close()
+ var hashError error
+ var hashWorker sync.WaitGroup
+ hashWorker.Add(1)
+ go func() {
+ t := tar.NewReader(contentReader)
+ _, err := t.Next()
+ if err != nil {
+ hashError = err
+ }
+ if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
+ hashError = err
+ }
+ hashWorker.Done()
+ }()
+ if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
+ err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
+ }
+ hashWorker.Wait()
+ if err == nil {
+ err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
+ }
+ return err
+ }
+ }
+ return archiver.CopyFileWithTar
+}
+
+// CopyWithTarAndChown returns a function which copies a directory tree from outside of
+// any container into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.CopyWithTar
+}
+
+// UntarPathAndChown returns a function which extracts an archive in a specified
+// location into our working container, mapping permissions using the
+// container's ID maps, possibly overridden using the passed-in chownOpts
+func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
+ untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
+ archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
+ if hasher != nil {
+ originalUntar := archiver.Untar
+ archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
+ }
+ }
+ return archiver.UntarPath
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 000000000..5f0d1fb6a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 000000000..7a950d177
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 000000000..9d2d8a4ba
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 000000000..336142a5e
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 000000000..9a28e57c3
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 000000000..851fcc087
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,736 @@
+![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Many of the most widely used Go projects are built using Cobra including:
+
+* [Kubernetes](http://kubernetes.io/)
+* [Hugo](http://gohugo.io)
+* [rkt](https://github.com/coreos/rkt)
+* [etcd](https://github.com/coreos/etcd)
+* [Moby (former Docker)](https://github.com/moby/moby)
+* [Docker (distribution)](https://github.com/docker/distribution)
+* [OpenShift](https://www.openshift.com/)
+* [Delve](https://github.com/derekparker/delve)
+* [GopherJS](http://www.gopherjs.org/)
+* [CockroachDB](http://www.cockroachlabs.com/)
+* [Bleve](http://www.blevesearch.com/)
+* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
+* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [rclone](http://rclone.org/)
+* [nehm](https://github.com/bogem/nehm)
+* [Pouch](https://github.com/alibaba/pouch)
+
+[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
+[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
+[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+ * [Commands](#commands)
+ * [Flags](#flags)
+- [Installing](#installing)
+- [Getting Started](#getting-started)
+ * [Using the Cobra Generator](#using-the-cobra-generator)
+ * [Using the Cobra Library](#using-the-cobra-library)
+ * [Working with Flags](#working-with-flags)
+ * [Positional and Custom Arguments](#positional-and-custom-arguments)
+ * [Example](#example)
+ * [Help Command](#help-command)
+ * [Usage Message](#usage-message)
+ * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
+ * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
+ * [Generating documentation for your command](#generating-documentation-for-your-command)
+ * [Generating bash completions](#generating-bash-completions)
+- [Contributing](#contributing)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated bash autocomplete for your application
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications will read like sentences when used. Users will know how
+to use the application because they will natively understand how to use it.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+ or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+ go get -u github.com/spf13/cobra/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Getting Started
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+ ▾ appName/
+ ▾ cmd/
+ add.go
+ your.go
+ commands.go
+ here.go
+ main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+ Use: "hugo",
+ Short: "Hugo is a very fast static site generator",
+ Long: `A Fast and Flexible Static Site Generator built with
+ love by spf13 and friends in Go.
+ Complete documentation is available at http://hugo.spf13.com`,
+ Run: func(cmd *cobra.Command, args []string) {
+ // Do Stuff Here
+ },
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+import (
+ "fmt"
+ "os"
+
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+func init() {
+ cobra.OnInitialize(initConfig)
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
+ rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
+ viper.SetDefault("license", "apache")
+}
+
+func initConfig() {
+ // Don't forget to read config either from cfgFile or from home directory!
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
+
+ if err := viper.ReadInConfig(); err != nil {
+ fmt.Println("Can't read config:", err)
+ os.Exit(1)
+ }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
+
+func main() {
+ cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print the version number of Hugo",
+ Long: `All software has versions. This is Hugo's`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+ },
+}
+```
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent' meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally which will only apply to that specific command.
+
+```go
+rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default Cobra only parses local flags on the target command, any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+ Use: "print [OPTIONS] [COMMANDS]",
+ TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+ rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example the persistent flag `author` is bound with `viper`.
+**Note**, that the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+ Short: "hello",
+ Args: func(cmd *cobra.Command, args []string) error {
+ if len(args) < 1 {
+ return errors.New("requires at least one arg")
+ }
+ if myapp.IsValidColor(args[0]) {
+ return nil
+ }
+ return fmt.Errorf("invalid color specified: %s", args[0])
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Hello, World!")
+ },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+ var echoTimes int
+
+ var cmdPrint = &cobra.Command{
+ Use: "print [string to print]",
+ Short: "Print anything to the screen",
+ Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdEcho = &cobra.Command{
+ Use: "echo [string to echo]",
+ Short: "Echo anything to the screen",
+ Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Print: " + strings.Join(args, " "))
+ },
+ }
+
+ var cmdTimes = &cobra.Command{
+ Use: "times [# times] [string to echo]",
+ Short: "Echo anything to the screen more times",
+ Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ for i := 0; i < echoTimes; i++ {
+ fmt.Println("Echo: " + strings.Join(args, " "))
+ }
+ },
+ }
+
+ cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+ var rootCmd = &cobra.Command{Use: "app"}
+ rootCmd.AddCommand(cmdPrint, cmdEcho)
+ cmdEcho.AddCommand(cmdTimes)
+ rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called. Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+ $ cobra help
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.
+
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+ $ cobra --invalid
+ Error: unknown flag: --invalid
+ Usage:
+ cobra [command]
+
+ Available Commands:
+ add Add a command to a Cobra Application
+ help Help about any command
+ init Initialize a Cobra Application
+
+ Flags:
+ -a, --author string author name for copyright attribution (default "YOUR NAME")
+ --config string config file (default is $HOME/.cobra.yaml)
+ -h, --help help for cobra
+ -l, --license string name of license for the project
+ --viper use Viper for configuration (default true)
+
+ Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+func main() {
+
+ var rootCmd = &cobra.Command{
+ Use: "root [sub]",
+ Short: "My root command",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+ },
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ var subCmd = &cobra.Command{
+ Use: "sub [no options!]",
+ Short: "My subcommand",
+ PreRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+ },
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd Run with args: %v\n", args)
+ },
+ PostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+ },
+ }
+
+ rootCmd.AddCommand(subCmd)
+
+ rootCmd.SetArgs([]string{""})
+ rootCmd.Execute()
+ fmt.Println()
+ rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+ rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+ server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+ delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. in the following formats:
+
+- [Markdown](doc/md_docs.md)
+- [ReStructured Text](doc/rest_docs.md)
+- [Man Page](doc/man_docs.md)
+
+## Generating bash completions
+
+Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
+
+# Contributing
+
+1. Fork it
+2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+3. Create your feature branch (`git checkout -b my-new-feature`)
+4. Make changes and add them (`git add .`)
+5. Commit your changes (`git commit -m 'Add some feature'`)
+6. Push to the branch (`git push origin my-new-feature`)
+7. Create new pull request
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 000000000..a5d8a9273
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,89 @@
+package cobra
+
+import (
+ "fmt"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ for _, v := range args {
+ if !stringInSlice(v, cmd.ValidArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 000000000..8fa8f486f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,584 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf *bytes.Buffer, name string) {
+ buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ buf.WriteString(fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+ return 0;
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions=("${must_have_one_noun[@]}")
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name))
+}
+
+func writePostscript(buf *bytes.Buffer, name string) {
+ name = strings.Replace(name, ":", "__", -1)
+ buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
+ buf.WriteString(fmt.Sprintf(`{
+ local cur prev words cword
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local last_command
+ local nouns=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ buf.WriteString("\n")
+}
+
+func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ buf.WriteString(" flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, name))
+}
+
+func writeFlags(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ if localNonPersistentFlags.Lookup(flag.Name) != nil {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ buf.WriteString("\n")
+}
+
+func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ for key := range flag.Annotations {
+ switch key {
+ case BashCompOneRequiredFlag:
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += "\")\n"
+ buf.WriteString(fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
+ }
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" must_have_one_noun=()\n")
+ sort.Sort(sort.StringSlice(cmd.ValidArgs))
+ for _, value := range cmd.ValidArgs {
+ buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+}
+
+func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Sort(sort.StringSlice(cmd.Aliases))
+
+ buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ buf.WriteString(` fi`)
+ buf.WriteString("\n")
+}
+func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
+ buf.WriteString(" noun_aliases=()\n")
+ sort.Sort(sort.StringSlice(cmd.ArgAliases))
+ for _, value := range cmd.ArgAliases {
+ buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf *bytes.Buffer, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c == cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.Replace(commandName, " ", "_", -1)
+ commandName = strings.Replace(commandName, ":", "__", -1)
+
+ if cmd.Root() == cmd {
+ buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
+ buf.WriteString("\n")
+ buf.WriteString(" command_aliases=()\n")
+ buf.WriteString("\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ buf.WriteString("}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 000000000..7010fd15b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,200 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText string = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ template := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ }
+ for i := range d {
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 000000000..34d1bf367
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1517 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // Long is the long message shown in the 'help <this-command>' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+ ValidArgs []string
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the bash completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom functions used by the bash autocompletion generator.
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ //FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // output is an output writer defined by user.
+ output io.Writer
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (c *Command) SetOutput(output io.Writer) {
+ c.output = output
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.output != nil {
+ return c.output
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString return usage string.
+func (c *Command) UsageString() string {
+ tmpOutput := c.output
+ bb := new(bytes.Buffer)
+ c.SetOutput(bb)
+ c.Usage()
+ c.output = tmpOutput
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+ for i, y := range args {
+ if x == y {
+ ret := []string{}
+ ret = append(ret, args[:i]...)
+ ret = append(ret, args[i+1:]...)
+ return ret
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[1] == '-') ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ suggestionsString := ""
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ suggestionsString += "\n\nDid you mean this?\n"
+ for _, s := range suggestions {
+ suggestionsString += fmt.Sprintf("\t%v\n", s)
+ }
+ }
+ return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if cmd.Name() == next || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ return matches[0]
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("Called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ break
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.validateRequiredFlags(); err != nil {
+ return err
+ }
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ break
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ break
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help as the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpCmd()
+
+ var args []string
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ } else {
+ args = c.args
+ }
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ c.Printf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if err == flag.ErrHelp {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilentErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.Println("Error:", err.Error())
+ }
+
+ // If root command has SilentUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return nil
+ }
+ return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ c.Flags().Bool("version", false, usage)
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ c.Root().Usage()
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.Help()
+ }
+ },
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + c.Use
+ } else {
+ useline = c.Use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if a == s {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parents commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ //do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 000000000..6159c1cc1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 000000000..edec728e4
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,20 @@
+// +build windows
+
+package cobra
+
+import (
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ time.Sleep(5 * time.Second)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 000000000..889c22e27
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,126 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// GenZshCompletionFile generates zsh completion file.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenZshCompletion(outFile)
+}
+
+// GenZshCompletion generates a zsh completion file and writes to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+
+ writeHeader(buf, c)
+ maxDepth := maxDepth(c)
+ writeLevelMapping(buf, maxDepth)
+ writeLevelCases(buf, maxDepth, c)
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func writeHeader(w io.Writer, cmd *Command) {
+ fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name())
+}
+
+func maxDepth(c *Command) int {
+ if len(c.Commands()) == 0 {
+ return 0
+ }
+ maxDepthSub := 0
+ for _, s := range c.Commands() {
+ subDepth := maxDepth(s)
+ if subDepth > maxDepthSub {
+ maxDepthSub = subDepth
+ }
+ }
+ return 1 + maxDepthSub
+}
+
+func writeLevelMapping(w io.Writer, numLevels int) {
+ fmt.Fprintln(w, `_arguments \`)
+ for i := 1; i <= numLevels; i++ {
+ fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i)
+ fmt.Fprintln(w)
+ }
+ fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files")
+ fmt.Fprintln(w)
+}
+
+func writeLevelCases(w io.Writer, maxDepth int, root *Command) {
+ fmt.Fprintln(w, "case $state in")
+ defer fmt.Fprintln(w, "esac")
+
+ for i := 1; i <= maxDepth; i++ {
+ fmt.Fprintf(w, " level%d)\n", i)
+ writeLevel(w, root, i)
+ fmt.Fprintln(w, " ;;")
+ }
+ fmt.Fprintln(w, " *)")
+ fmt.Fprintln(w, " _arguments '*: :_files'")
+ fmt.Fprintln(w, " ;;")
+}
+
+func writeLevel(w io.Writer, root *Command, i int) {
+ fmt.Fprintf(w, " case $words[%d] in\n", i)
+ defer fmt.Fprintln(w, " esac")
+
+ commands := filterByLevel(root, i)
+ byParent := groupByParent(commands)
+
+ for p, c := range byParent {
+ names := names(c)
+ fmt.Fprintf(w, " %s)\n", p)
+ fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " "))
+ fmt.Fprintln(w, " ;;")
+ }
+ fmt.Fprintln(w, " *)")
+ fmt.Fprintln(w, " _arguments '*: :_files'")
+ fmt.Fprintln(w, " ;;")
+
+}
+
+func filterByLevel(c *Command, l int) []*Command {
+ cs := make([]*Command, 0)
+ if l == 0 {
+ cs = append(cs, c)
+ return cs
+ }
+ for _, s := range c.Commands() {
+ cs = append(cs, filterByLevel(s, l-1)...)
+ }
+ return cs
+}
+
+func groupByParent(commands []*Command) map[string][]*Command {
+ m := make(map[string][]*Command)
+ for _, c := range commands {
+ parent := c.Parent()
+ if parent == nil {
+ continue
+ }
+ m[parent.Name()] = append(m[parent.Name()], c)
+ }
+ return m
+}
+
+func names(commands []*Command) []string {
+ ns := make([]string, len(commands))
+ for i, c := range commands {
+ ns[i] = c.Name()
+ }
+ return ns
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/.editorconfig b/vendor/gopkg.in/fsnotify.v1/.editorconfig
new file mode 100644
index 000000000..ba49e3c23
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.editorconfig
@@ -0,0 +1,5 @@
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
diff --git a/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md b/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..4ad1aed8f
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,11 @@
+Before reporting an issue, please ensure you are using the latest release of fsnotify.
+
+### Which operating system (GOOS) and version are you using?
+
+Linux: lsb_release -a
+macOS: sw_vers
+Windows: systeminfo | findstr /B /C:OS
+
+### Please describe the issue that occurred.
+
+### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible.
diff --git a/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md b/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..64ddf7cef
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,8 @@
+#### What does this pull request do?
+
+
+#### Where should the reviewer start?
+
+
+#### How should this be manually tested?
+
diff --git a/vendor/gopkg.in/fsnotify.v1/.gitignore b/vendor/gopkg.in/fsnotify.v1/.gitignore
new file mode 100644
index 000000000..4cd0cbaf4
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/gopkg.in/fsnotify.v1/.travis.yml b/vendor/gopkg.in/fsnotify.v1/.travis.yml
new file mode 100644
index 000000000..981d1bb81
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/.travis.yml
@@ -0,0 +1,30 @@
+sudo: false
+language: go
+
+go:
+ - 1.8.x
+ - 1.9.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+before_script:
+ - go get -u github.com/golang/lint/golint
+
+script:
+ - go test -v --race ./...
+
+after_script:
+ - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - go vet ./...
+
+os:
+ - linux
+ - osx
+
+notifications:
+ email: false
diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify.v1/AUTHORS
new file mode 100644
index 000000000..5ab5d41c5
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/AUTHORS
@@ -0,0 +1,52 @@
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L <aaron@bettercoder.net>
+Adrien Bustany <adrien@bustany.org>
+Amit Krishnan <amit.krishnan@oracle.com>
+Anmol Sethi <me@anmol.io>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <chris@howey.me> <howeyc@gmail.com>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Daniel Wagner-Hall <dawagner@gmail.com>
+Dave Cheney <dave@cheney.net>
+Evan Phoenix <evan@fallingsnow.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Nickolai Zeldovich <nickolai@csail.mit.edu>
+Patrick <patrick@dropbox.com>
+Paul Hammond <paul@paulhammond.org>
+Pawel Knap <pawelknap88@gmail.com>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Riku Voipio <riku.voipio@linaro.org>
+Rob Figueiredo <robfig@gmail.com>
+Rodrigo Chiossi <rodrigochiossi@gmail.com>
+Slawek Ligus <root@ooz.ie>
+Soge Zhang <zhssoge@gmail.com>
+Tiffany Jernigan <tiffany.jernigan@intel.com>
+Tilak Sharma <tilaks@google.com>
+Tom Payne <twpayne@gmail.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Vahe Khachikyan <vahe@live.ca>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
+铁哥 <guotie.9@gmail.com>
diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
new file mode 100644
index 000000000..be4d7ea2c
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
@@ -0,0 +1,317 @@
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
new file mode 100644
index 000000000..828a60b24
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/gopkg.in/fsnotify.v1/README.md
index 3c891e349..399320741 100644
--- a/vendor/gopkg.in/fsnotify.v1/README.md
+++ b/vendor/gopkg.in/fsnotify.v1/README.md
@@ -8,14 +8,14 @@ fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather
go get -u golang.org/x/sys/...
```
-Cross platform: Windows, Linux, BSD and OS X.
+Cross platform: Windows, Linux, BSD and macOS.
|Adapter |OS |Status |
|----------|----------|----------|
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
-|kqueue |BSD, OS X, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
-|FSEvents |OS X |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|fanotify |Linux 2.6.37+ | |
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
@@ -23,7 +23,7 @@ Cross platform: Windows, Linux, BSD and OS X.
\* Android and iOS are untested.
-Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information.
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
@@ -41,6 +41,35 @@ Please refer to [CONTRIBUTING][] before opening an issue or pull request.
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
diff --git a/vendor/gopkg.in/fsnotify.v1/example_test.go b/vendor/gopkg.in/fsnotify.v1/example_test.go
new file mode 100644
index 000000000..700502cb3
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/example_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify_test
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func ExampleNewWatcher() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event := <-watcher.Events:
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err := <-watcher.Errors:
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify.v1/fsnotify.go
index e7f55fee7..190bf0de5 100644
--- a/vendor/gopkg.in/fsnotify.v1/fsnotify.go
+++ b/vendor/gopkg.in/fsnotify.v1/fsnotify.go
@@ -9,6 +9,7 @@ package fsnotify
import (
"bytes"
+ "errors"
"fmt"
)
@@ -60,3 +61,6 @@ func (op Op) String() string {
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go b/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go
new file mode 100644
index 000000000..f9771d9df
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/fsnotify_test.go
@@ -0,0 +1,70 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify
+
+import (
+ "os"
+ "testing"
+ "time"
+)
+
+func TestEventStringWithValue(t *testing.T) {
+ for opMask, expectedString := range map[Op]string{
+ Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
+ Rename: `"/usr/someFile": RENAME`,
+ Remove: `"/usr/someFile": REMOVE`,
+ Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
+ } {
+ event := Event{Name: "/usr/someFile", Op: opMask}
+ if event.String() != expectedString {
+ t.Fatalf("Expected %s, got: %v", expectedString, event.String())
+ }
+
+ }
+}
+
+func TestEventOpStringWithValue(t *testing.T) {
+ expectedOpString := "WRITE|CHMOD"
+ event := Event{Name: "someFile", Op: Write | Chmod}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+func TestEventOpStringWithNoValue(t *testing.T) {
+ expectedOpString := ""
+ event := Event{Name: "testFile", Op: 0}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+// TestWatcherClose tests that the goroutine started by creating the watcher can be
+// signalled to return at any time, even if there is no goroutine listening on the events
+// or errors channels.
+func TestWatcherClose(t *testing.T) {
+ t.Parallel()
+
+ name := tempMkFile(t, "")
+ w := newWatcher(t)
+ err := w.Add(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = os.Remove(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Allow the watcher to receive the event.
+ time.Sleep(time.Millisecond * 100)
+
+ err = w.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify.v1/inotify.go
index f3b74c51f..d9fd1b88a 100644
--- a/vendor/gopkg.in/fsnotify.v1/inotify.go
+++ b/vendor/gopkg.in/fsnotify.v1/inotify.go
@@ -24,7 +24,6 @@ type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
- cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
@@ -56,7 +55,6 @@ func NewWatcher() (*Watcher, error) {
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
- w.cv = sync.NewCond(&w.mu)
go w.readEvents()
return w, nil
@@ -103,21 +101,23 @@ func (w *Watcher) Add(name string) error {
var flags uint32 = agnosticEvents
w.mu.Lock()
- watchEntry, found := w.watches[name]
- w.mu.Unlock()
- if found {
- watchEntry.flags |= flags
- flags |= unix.IN_MASK_ADD
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
- w.mu.Lock()
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- w.mu.Unlock()
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
return nil
}
@@ -135,6 +135,13 @@ func (w *Watcher) Remove(name string) error {
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
@@ -152,13 +159,6 @@ func (w *Watcher) Remove(name string) error {
return errno
}
- // wait until ignoreLinux() deleting maps
- exists := true
- for exists {
- w.cv.Wait()
- _, exists = w.watches[name]
- }
-
return nil
}
@@ -245,13 +245,31 @@ func (w *Watcher) readEvents() {
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
- name := w.paths[int(raw.Wd)]
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
w.mu.Unlock()
+
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
@@ -262,7 +280,7 @@ func (w *Watcher) readEvents() {
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
- if !event.ignoreLinux(w, raw.Wd, mask) {
+ if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
@@ -279,15 +297,9 @@ func (w *Watcher) readEvents() {
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
-func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
- w.mu.Lock()
- defer w.mu.Unlock()
- name := w.paths[int(wd)]
- delete(w.paths, int(wd))
- delete(w.watches, name)
- w.cv.Broadcast()
return true
}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
new file mode 100644
index 000000000..26623efef
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
@@ -0,0 +1,229 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type testFd [2]int
+
+func makeTestFd(t *testing.T) testFd {
+ var tfd testFd
+ errno := unix.Pipe(tfd[:])
+ if errno != nil {
+ t.Fatalf("Failed to create pipe: %v", errno)
+ }
+ return tfd
+}
+
+func (tfd testFd) fd() int {
+ return tfd[0]
+}
+
+func (tfd testFd) closeWrite(t *testing.T) {
+ errno := unix.Close(tfd[1])
+ if errno != nil {
+ t.Fatalf("Failed to close write end of pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) put(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Write(tfd[1], buf)
+ if errno != nil {
+ t.Fatalf("Failed to write to pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) get(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Read(tfd[0], buf)
+ if errno != nil {
+ t.Fatalf("Failed to read from pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) close() {
+ unix.Close(tfd[1])
+ unix.Close(tfd[0])
+}
+
+func makePoller(t *testing.T) (testFd, *fdPoller) {
+ tfd := makeTestFd(t)
+ poller, err := newFdPoller(tfd.fd())
+ if err != nil {
+ t.Fatalf("Failed to create poller: %v", err)
+ }
+ return tfd, poller
+}
+
+func TestPollerWithBadFd(t *testing.T) {
+ _, err := newFdPoller(-1)
+ if err != unix.EBADF {
+ t.Fatalf("Expected EBADF, got: %v", err)
+ }
+}
+
+func TestPollerWithData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+ tfd.get(t)
+}
+
+func TestPollerWithWakeup(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerWithClose(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.closeWrite(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+}
+
+func TestPollerWithWakeupAndData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+
+ // both data and wakeup
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ // data is still in the buffer, wakeup is cleared
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ tfd.get(t)
+ // data is gone, only wakeup now
+ err = poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerConcurrent(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ oks := make(chan bool)
+ live := make(chan bool)
+ defer close(live)
+ go func() {
+ defer close(oks)
+ for {
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ oks <- ok
+ if !<-live {
+ return
+ }
+ }
+ }()
+
+ // Try a write
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.put(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+ live <- true
+
+ // Try a wakeup
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ if <-oks {
+ t.Fatalf("expected false")
+ }
+ live <- true
+
+ // Try a close
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.closeWrite(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_test.go
new file mode 100644
index 000000000..54f3f00eb
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify_test.go
@@ -0,0 +1,449 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestInotifyCloseRightAway(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Close immediately; it won't even reach the first unix.Read.
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLater(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+ w.Add(testDir)
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseAfterRead(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add .")
+ }
+
+ // Generate an event.
+ os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
+
+ // Wait for readEvents to read the event, then close the watcher.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func isWatcherReallyClosed(t *testing.T, w *Watcher) {
+ select {
+ case err, ok := <-w.Errors:
+ if ok {
+ t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
+ }
+ default:
+ t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
+ }
+
+ select {
+ case _, ok := <-w.Events:
+ if ok {
+ t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
+ }
+ default:
+ t.Fatalf("w.Events would have blocked; readEvents is still alive!")
+ }
+}
+
+func TestInotifyCloseCreate(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+ h, err := os.Create(filepath.Join(testDir, "testfile"))
+ if err != nil {
+ t.Fatalf("Failed to create file in testdir: %v", err)
+ }
+ h.Close()
+ select {
+ case _ = <-w.Events:
+ case err := <-w.Errors:
+ t.Fatalf("Error from watcher: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("Took too long to wait for event")
+ }
+
+ // At this point, we've received one event, so the goroutine is ready.
+ // It's also blocking on unix.Read.
+ // Now we try to swap the file descriptor under its nose.
+ w.Close()
+ w, err = NewWatcher()
+ defer w.Close()
+ if err != nil {
+ t.Fatalf("Failed to create second watcher: %v", err)
+ }
+
+ <-time.After(50 * time.Millisecond)
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Error adding testDir again: %v", err)
+ }
+}
+
+// This test verifies the watcher can keep up with file creations/deletions
+// when under load.
+func TestInotifyStress(t *testing.T) {
+ maxNumToCreate := 1000
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFilePrefix := filepath.Join(testDir, "testfile")
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+
+ doneChan := make(chan struct{})
+ // The buffer ensures that the file generation goroutine is never blocked.
+ errChan := make(chan error, 2*maxNumToCreate)
+
+ go func() {
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+
+ // If we delete a newly created file too quickly, inotify will skip the
+ // create event and only send the delete event.
+ time.Sleep(100 * time.Millisecond)
+
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+ err = os.Remove(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Remove failed: %v", err)
+ }
+ }
+
+ close(doneChan)
+ }()
+
+ creates := 0
+ removes := 0
+
+ finished := false
+ after := time.After(10 * time.Second)
+ for !finished {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case <-doneChan:
+ finished = true
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ }
+ }
+
+ // Drain remaining events from channels
+ count := 0
+ for count < 10 {
+ select {
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ count = 0
+ default:
+ count++
+ // Give the watcher chances to fill the channels.
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ if creates-removes > 1 || creates-removes < -1 {
+ t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
+ }
+ if creates < 50 {
+ t.Fatalf("Expected at least 50 creates, got %d", creates)
+ }
+}
+
+func TestInotifyRemoveTwice(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err != nil {
+ t.Fatalf("wanted successful remove but got: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err == nil {
+ t.Fatalf("no error on removing invalid file")
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyInnerMapLength(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+ go func() {
+ for err := range w.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Failed to remove testFile: %v", err)
+ }
+ _ = <-w.Events // consume Remove event
+ <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyOverflow(t *testing.T) {
+ // We need to generate many more events than the
+ // fs.inotify.max_queued_events sysctl setting.
+ // We use multiple goroutines (one per directory)
+ // to speed up file creation.
+ numDirs := 128
+ numFiles := 1024
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ err := os.Mkdir(testSubdir, 0777)
+ if err != nil {
+ t.Fatalf("Cannot create subdir: %v", err)
+ }
+
+ err = w.Add(testSubdir)
+ if err != nil {
+ t.Fatalf("Failed to add subdir: %v", err)
+ }
+ }
+
+ errChan := make(chan error, numDirs*numFiles)
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ go func() {
+ for fn := 0; fn < numFiles; fn++ {
+ testFile := fmt.Sprintf("%s/%d", testSubdir, fn)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+ }()
+ }
+
+ creates := 0
+ overflows := 0
+
+ after := time.After(10 * time.Second)
+ for overflows == 0 && creates < numDirs*numFiles {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ if err == ErrEventOverflow {
+ overflows++
+ } else {
+ t.Fatalf("Got an error from watcher: %v", err)
+ }
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testDir) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ }
+ }
+
+ if creates == numDirs*numFiles {
+ t.Fatalf("Could not trigger overflow")
+ }
+
+ if overflows == 0 {
+ t.Fatalf("No overflow and not enough creates (expected %d, got %d)",
+ numDirs*numFiles, creates)
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go b/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
new file mode 100644
index 000000000..cd6adc273
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
@@ -0,0 +1,147 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fsnotify
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// testExchangedataForWatcher tests the watcher with the exchangedata operation on macOS.
+//
+// This is widely used for atomic saves on macOS, e.g. TextMate and in Apple's NSDocument.
+//
+// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
+// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
+func testExchangedataForWatcher(t *testing.T, watchDir bool) {
+ // Create directory to watch
+ testDir1 := tempMkdir(t)
+
+ // For the intermediate file
+ testDir2 := tempMkdir(t)
+
+ defer os.RemoveAll(testDir1)
+ defer os.RemoveAll(testDir2)
+
+ resolvedFilename := "TestFsnotifyEvents.file"
+
+ // TextMate does:
+ //
+ // 1. exchangedata (intermediate, resolved)
+ // 2. unlink intermediate
+ //
+ // Let's try to simulate that:
+ resolved := filepath.Join(testDir1, resolvedFilename)
+ intermediate := filepath.Join(testDir2, resolvedFilename+"~")
+
+ // Make sure we create the file before we start watching
+ createAndSyncFile(t, resolved)
+
+ watcher := newWatcher(t)
+
+ // Test both variants in isolation
+ if watchDir {
+ addWatch(t, watcher, testDir1)
+ } else {
+ addWatch(t, watcher, resolved)
+ }
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var removeReceived counter
+ var createReceived counter
+
+ done := make(chan bool)
+
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(resolved) {
+ if event.Op&Remove == Remove {
+ removeReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ }
+ t.Logf("event received: %s", event)
+ }
+ done <- true
+ }()
+
+ // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
+ for i := 1; i <= 3; i++ {
+ // The intermediate file is created in a folder outside the watcher
+ createAndSyncFile(t, intermediate)
+
+ // 1. Swap
+ if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
+ t.Fatalf("[%d] exchangedata failed: %s", i, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ // 2. Delete the intermediate file
+ err := os.Remove(intermediate)
+
+ if err != nil {
+ t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
+ if removeReceived.value() < 3 {
+ t.Fatal("fsnotify remove events have not been received after 500 ms")
+ }
+
+ if createReceived.value() < 3 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
+func TestExchangedataInWatchedDir(t *testing.T) {
+ testExchangedataForWatcher(t, true)
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on watched file.
+func TestExchangedataInWatchedFile(t *testing.T) {
+ testExchangedataForWatcher(t, false)
+}
+
+func createAndSyncFile(t *testing.T, filepath string) {
+ f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating %s failed: %s", filepath, err)
+ }
+ f1.Sync()
+ f1.Close()
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_test.go b/vendor/gopkg.in/fsnotify.v1/integration_test.go
new file mode 100644
index 000000000..8b7e9d3ec
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/integration_test.go
@@ -0,0 +1,1237 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+package fsnotify
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+// An atomic counter
+type counter struct {
+ val int32
+}
+
+func (c *counter) increment() {
+ atomic.AddInt32(&c.val, 1)
+}
+
+func (c *counter) value() int32 {
+ return atomic.LoadInt32(&c.val)
+}
+
+func (c *counter) reset() {
+ atomic.StoreInt32(&c.val, 0)
+}
+
+// tempMkdir makes a temporary directory
+func tempMkdir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test directory: %s", err)
+ }
+ return dir
+}
+
+// tempMkFile makes a temporary file.
+func tempMkFile(t *testing.T, dir string) string {
+ f, err := ioutil.TempFile(dir, "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test file: %v", err)
+ }
+ defer f.Close()
+ return f.Name()
+}
+
+// newWatcher initializes an fsnotify Watcher instance.
+func newWatcher(t *testing.T) *Watcher {
+ watcher, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("NewWatcher() failed: %s", err)
+ }
+ return watcher
+}
+
+// addWatch adds a watch for a directory
+func addWatch(t *testing.T, watcher *Watcher, dir string) {
+ if err := watcher.Add(dir); err != nil {
+ t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
+ }
+}
+
+func TestFsnotifyMultipleOperations(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory that's not watched
+ testDirToMoveFiles := tempMkdir(t)
+ defer os.RemoveAll(testDirToMoveFiles)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+ testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived, renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // Modify the file outside of the watched dir
+ f, err = os.Open(testFileRenamed)
+ if err != nil {
+ t.Fatalf("open test renamed file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file that was moved
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ rReceived := renameReceived.value()
+ if dReceived+rReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyMultipleCreates(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived < 3 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDirOnly(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ // This should NOT add any events to the fsnotify event queue
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+ os.Remove(testFileAlreadyExists)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 1 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDeleteWatchedDir(t *testing.T) {
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFileAlreadyExists)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var deleteReceived counter
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ }()
+
+ os.RemoveAll(testDir)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ dReceived := deleteReceived.value()
+ if dReceived < 2 {
+ t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
+ }
+}
+
+func TestFsnotifySubDir(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
+ testSubDir := filepath.Join(testDir, "sub")
+ testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
+ t.Logf("event received: %s", event)
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ // Create sub-directory
+ if err := os.Mkdir(testSubDir, 0777); err != nil {
+ t.Fatalf("failed to create test sub-directory: %s", err)
+ }
+
+ // Create a file
+ var f *os.File
+ f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ // Create a file (Should not see this! we are not watching subdir)
+ var fs *os.File
+ fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fs.Sync()
+ fs.Close()
+
+ time.Sleep(200 * time.Millisecond)
+
+ // Make sure receive deletes for both file and sub-directory
+ os.RemoveAll(testSubDir)
+ os.Remove(testFile1)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyRename(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if renameReceived.value() == 0 {
+ t.Fatal("fsnotify rename events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToCreate(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if createReceived.value() == 0 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToOverwrite(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Create a file
+ var fr *os.File
+ fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fr.Sync()
+ fr.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var eventReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testFileRenamed) {
+ eventReceived.increment()
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if eventReceived.value() == 0 {
+ t.Fatal("fsnotify events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestRemovalOfWatch(t *testing.T) {
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+ if err := watcher.Remove(testDir); err != nil {
+ t.Fatalf("Could not remove the watch: %v\n", err)
+ }
+
+ go func() {
+ select {
+ case ev := <-watcher.Events:
+ t.Fatalf("We received event: %v\n", ev)
+ case <-time.After(500 * time.Millisecond):
+ t.Log("No event received, as expected.")
+ }
+ }()
+
+ time.Sleep(200 * time.Millisecond)
+ // Modify the file outside of the watched dir
+ f, err := os.Open(testFileAlreadyExists)
+ if err != nil {
+ t.Fatalf("Open test file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+ if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+ time.Sleep(400 * time.Millisecond)
+}
+
+func TestFsnotifyAttrib(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("attributes don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ // The modifyReceived counter counts IsModify events that are not IsAttrib,
+ // and the attribReceived counts IsAttrib events (which are also IsModify as
+ // a consequence).
+ var modifyReceived counter
+ var attribReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Chmod == Chmod {
+ attribReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := os.Chmod(testFile, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
+ time.Sleep(500 * time.Millisecond)
+ if modifyReceived.value() != 0 {
+ t.Fatal("received an unexpected modify event when creating a test file")
+ }
+ if attribReceived.value() == 0 {
+ t.Fatal("fsnotify attribute events have not received after 500 ms")
+ }
+
+ // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
+ // might have been modified).
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
+ if err != nil {
+ t.Fatalf("reopening test file failed: %s", err)
+ }
+
+ f.WriteString("more data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(500 * time.Millisecond)
+
+ if modifyReceived.value() != 1 {
+ t.Fatal("didn't receive a modify event after changing test file contents")
+ }
+
+ if attribReceived.value() != 0 {
+ t.Fatal("did receive an unexpected attrib event after changing test file contents")
+ }
+
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
+ // of the file are not changed though)
+ if err := os.Chmod(testFile, 0600); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ time.Sleep(500 * time.Millisecond)
+
+ if attribReceived.value() != 1 {
+ t.Fatal("didn't receive an attribute change after 500ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(1e9):
+ t.Fatal("event stream was not closed after 1 second")
+ }
+
+ os.Remove(testFile)
+}
+
+func TestFsnotifyClose(t *testing.T) {
+ watcher := newWatcher(t)
+ watcher.Close()
+
+ var done int32
+ go func() {
+ watcher.Close()
+ atomic.StoreInt32(&done, 1)
+ }()
+
+ time.Sleep(50e6) // 50 ms
+ if atomic.LoadInt32(&done) == 0 {
+ t.Fatal("double Close() test failed: second Close() call didn't return")
+ }
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ if err := watcher.Add(testDir); err == nil {
+ t.Fatal("expected error on Watch() after Close(), got nil")
+ }
+}
+
+func TestFsnotifyFakeSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ var errorsReceived counter
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for errors := range watcher.Errors {
+ t.Logf("Received error: %s", errors)
+ errorsReceived.increment()
+ }
+ }()
+
+ // Count the CREATE events received
+ var createEventsReceived, otherEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ t.Logf("event received: %s", ev)
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ } else {
+ otherEventsReceived.increment()
+ }
+ }
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
+ t.Fatalf("Failed to create bogus symlink: %s", err)
+ }
+ t.Logf("Created bogus symlink")
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // Should not be error, just no events for broken links (watching nothing)
+ if errorsReceived.value() > 0 {
+ t.Fatal("fsnotify errors have been received.")
+ }
+ if otherEventsReceived.value() > 0 {
+ t.Fatal("fsnotify other events received on the broken link")
+ }
+
+ // Except for 1 create event (for the link itself)
+ if createEventsReceived.value() == 0 {
+ t.Fatal("fsnotify create events were not received after 500 ms")
+ }
+ if createEventsReceived.value() > 1 {
+ t.Fatal("fsnotify more create events received than expected")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+}
+
+func TestCyclicSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ link := path.Join(testDir, "link")
+ if err := os.Symlink(".", link); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+ addWatch(t, watcher, testDir)
+
+ var createEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ }
+ }
+ }()
+
+ if err := os.Remove(link); err != nil {
+ t.Fatalf("Error removing link: %v", err)
+ }
+
+ // It would be nice to be able to expect a delete event here, but kqueue has
+ // no way for us to get events on symlinks themselves, because opening them
+ // opens an fd to the file to which they point.
+
+ if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ if got := createEventsReceived.value(); got == 0 {
+ t.Errorf("want at least 1 create event got %v", got)
+ }
+
+ watcher.Close()
+}
+
+// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
+// See https://codereview.appspot.com/103300045/
+// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
+func TestConcurrentRemovalOfWatch(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skip("regression test for race only present on darwin")
+ }
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Test that RemoveWatch can be invoked concurrently, with no data races.
+ removed1 := make(chan struct{})
+ go func() {
+ defer close(removed1)
+ watcher.Remove(testDir)
+ }()
+ removed2 := make(chan struct{})
+ go func() {
+ close(removed2)
+ watcher.Remove(testDir)
+ }()
+ <-removed1
+ <-removed2
+}
+
+func TestClose(t *testing.T) {
+ // Regression test for #59 bad file descriptor from Close
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ err := watcher.Close()
+ if err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+// TestRemoveWithClose tests if one can handle Remove events and, at the same
+// time, close Watcher object without any data races.
+func TestRemoveWithClose(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ const fileN = 200
+ tempFiles := make([]string, 0, fileN)
+ for i := 0; i < fileN; i++ {
+ tempFiles = append(tempFiles, tempMkFile(t, testDir))
+ }
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ startC, stopC := make(chan struct{}), make(chan struct{})
+ errC := make(chan error)
+ go func() {
+ for {
+ select {
+ case <-watcher.Errors:
+ case <-watcher.Events:
+ case <-stopC:
+ return
+ }
+ }
+ }()
+ go func() {
+ <-startC
+ for _, fileName := range tempFiles {
+ os.Remove(fileName)
+ }
+ }()
+ go func() {
+ <-startC
+ errC <- watcher.Close()
+ }()
+ close(startC)
+ defer close(stopC)
+ if err := <-errC; err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+func testRename(file1, file2 string) error {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ return os.Rename(file1, file2)
+ default:
+ cmd := exec.Command("mv", file1, file2)
+ return cmd.Run()
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify.v1/kqueue.go
index c2b4acb18..86e76a3d6 100644
--- a/vendor/gopkg.in/fsnotify.v1/kqueue.go
+++ b/vendor/gopkg.in/fsnotify.v1/kqueue.go
@@ -22,7 +22,7 @@ import (
type Watcher struct {
Events chan Event
Errors chan error
- done chan bool // Channel for sending a "quit message" to the reader goroutine
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
@@ -56,7 +56,7 @@ func NewWatcher() (*Watcher, error) {
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
- done: make(chan bool),
+ done: make(chan struct{}),
}
go w.readEvents()
@@ -71,10 +71,8 @@ func (w *Watcher) Close() error {
return nil
}
w.isClosed = true
- w.mu.Unlock()
// copy paths to remove while locked
- w.mu.Lock()
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
@@ -82,15 +80,12 @@ func (w *Watcher) Close() error {
w.mu.Unlock()
// unlock before calling Remove, which also locks
- var err error
for _, name := range pathsToRemove {
- if e := w.Remove(name); e != nil && err == nil {
- err = e
- }
+ w.Remove(name)
}
- // Send "quit" message to the reader goroutine:
- w.done <- true
+ // send a "quit" message to the reader goroutine
+ close(w.done)
return nil
}
@@ -266,17 +261,12 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
+loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
- err := unix.Close(w.kq)
- if err != nil {
- w.Errors <- err
- }
- close(w.Events)
- close(w.Errors)
- return
+ break loop
default:
}
@@ -284,7 +274,11 @@ func (w *Watcher) readEvents() {
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
- w.Errors <- err
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
continue
}
@@ -319,8 +313,12 @@ func (w *Watcher) readEvents() {
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
- // Send the event on the Events channel
- w.Events <- event
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
}
if event.Op&Remove == Remove {
@@ -352,6 +350,18 @@ func (w *Watcher) readEvents() {
kevents = kevents[1:]
}
}
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
@@ -407,7 +417,11 @@ func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
- w.Errors <- err
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
}
// Search for new files
@@ -428,7 +442,11 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
w.mu.Unlock()
if !doesExist {
// Send create event
- w.Events <- newCreateEvent(filePath)
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
}
// like watchDirectoryFiles (but without doing another ReadDir)