aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2018-04-25 13:26:52 -0500
committerAtomic Bot <atomic-devel@projectatomic.io>2018-04-27 20:51:07 +0000
commita824186ac9803ef5f7548df790988a4ebd2d9c07 (patch)
tree63c64e9be4d9c44bd160dd974b740231497eabcd
parent4e468ce83d69e9748e80eb98a6f5bd3c5114cc7d (diff)
downloadpodman-a824186ac9803ef5f7548df790988a4ebd2d9c07.tar.gz
podman-a824186ac9803ef5f7548df790988a4ebd2d9c07.tar.bz2
podman-a824186ac9803ef5f7548df790988a4ebd2d9c07.zip
Use buildah commit and bud in podman
Vendor in buildah and use as much of commit and bug as possible for podman build and commit. Resolves #586 Signed-off-by: baude <bbaude@redhat.com> Closes: #681 Approved by: mheon
-rw-r--r--.travis.yml4
-rw-r--r--Dockerfile9
-rw-r--r--Makefile11
-rw-r--r--cmd/podman/build.go410
-rw-r--r--cmd/podman/commit.go29
-rw-r--r--cmd/podman/varlink/ioprojectatomicpodman.go966
-rw-r--r--libpod/buildah/buildah.go233
-rw-r--r--libpod/buildah/util.go67
-rw-r--r--libpod/container_commit.go48
-rw-r--r--libpod/image/image.go41
-rw-r--r--libpod/runtime_img.go7
-rw-r--r--pkg/inspect/inspect.go1
-rw-r--r--test/e2e/commit_test.go2
-rw-r--r--vendor.conf6
-rw-r--r--vendor/github.com/Nvveen/Gotty/LICENSE26
-rw-r--r--vendor/github.com/Nvveen/Gotty/README5
-rw-r--r--vendor/github.com/Nvveen/Gotty/attributes.go514
-rw-r--r--vendor/github.com/Nvveen/Gotty/gotty.go238
-rw-r--r--vendor/github.com/Nvveen/Gotty/parser.go362
-rw-r--r--vendor/github.com/Nvveen/Gotty/types.go23
-rw-r--r--vendor/github.com/containerd/continuity/LICENSE202
-rw-r--r--vendor/github.com/containerd/continuity/README.md74
-rw-r--r--vendor/github.com/containerd/continuity/pathdriver/path_driver.go85
-rw-r--r--vendor/github.com/containerd/continuity/vendor.conf13
-rw-r--r--vendor/github.com/docker/docker/api/common.go56
-rw-r--r--vendor/github.com/docker/docker/api/names.go9
-rw-r--r--vendor/github.com/docker/docker/api/types/client.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/container/host_config.go39
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/parse.go272
-rw-r--r--vendor/github.com/docker/docker/api/types/mount/mount.go4
-rw-r--r--vendor/github.com/docker/docker/api/types/time/timestamp.go4
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/command/command.go46
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go399
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go363
-rw-r--r--vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go118
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_list.go6
-rw-r--r--vendor/github.com/docker/docker/client/client.go86
-rw-r--r--vendor/github.com/docker/docker/client/config_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/config_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/config_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/container_commit.go2
-rw-r--r--vendor/github.com/docker/docker/client/container_create.go2
-rw-r--r--vendor/github.com/docker/docker/client/container_inspect.go11
-rw-r--r--vendor/github.com/docker/docker/client/container_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/errors.go245
-rw-r--r--vendor/github.com/docker/docker/client/hijack.go5
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/image_remove.go4
-rw-r--r--vendor/github.com/docker/docker/client/image_search.go2
-rw-r--r--vendor/github.com/docker/docker/client/network_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/network_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/node_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/node_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/node_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/parse_logs.go41
-rw-r--r--vendor/github.com/docker/docker/client/ping.go8
-rw-r--r--vendor/github.com/docker/docker/client/plugin_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/plugin_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/plugin_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/request.go2
-rw-r--r--vendor/github.com/docker/docker/client/secret_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/secret_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/secret_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go20
-rw-r--r--vendor/github.com/docker/docker/client/service_inspect.go6
-rw-r--r--vendor/github.com/docker/docker/client/service_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/service_remove.go2
-rw-r--r--vendor/github.com/docker/docker/client/task_inspect.go7
-rw-r--r--vendor/github.com/docker/docker/client/task_list.go2
-rw-r--r--vendor/github.com/docker/docker/client/tlsconfig_clone.go11
-rw-r--r--vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go (renamed from vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go)6
-rw-r--r--vendor/github.com/docker/docker/client/transport.go8
-rw-r--r--vendor/github.com/docker/docker/client/utils.go2
-rw-r--r--vendor/github.com/docker/docker/client/volume_inspect.go16
-rw-r--r--vendor/github.com/docker/docker/client/volume_remove.go2
-rw-r--r--vendor/github.com/docker/docker/hack/README.md60
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md69
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf2
-rw-r--r--vendor/github.com/docker/docker/opts/env.go48
-rw-r--r--vendor/github.com/docker/docker/opts/hosts.go165
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_unix.go8
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_windows.go6
-rw-r--r--vendor/github.com/docker/docker/opts/ip.go47
-rw-r--r--vendor/github.com/docker/docker/opts/opts.go327
-rw-r--r--vendor/github.com/docker/docker/opts/opts_unix.go6
-rw-r--r--vendor/github.com/docker/docker/opts/opts_windows.go56
-rw-r--r--vendor/github.com/docker/docker/opts/quotedstring.go37
-rw-r--r--vendor/github.com/docker/docker/opts/runtime.go79
-rw-r--r--vendor/github.com/docker/docker/opts/ulimit.go81
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go1237
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go92
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_other.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go122
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_windows.go79
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes.go441
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_linux.go313
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_other.go97
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_unix.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/changes_windows.go30
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy.go472
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_unix.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/copy_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/diff.go256
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_linux.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/time_unsupported.go16
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/whiteouts.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/wrap.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils.go298
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go27
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go317
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go3
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/pools/pools.go137
-rw-r--r--vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go190
-rw-r--r--vendor/github.com/docker/docker/pkg/system/events_windows.go85
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_unix.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_windows.go5
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_linux.go4
-rw-r--r--vendor/github.com/docker/docker/pkg/term/ascii.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term_windows.go31
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_linux.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/term/winsize.go18
-rw-r--r--vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go11
-rw-r--r--vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go2
-rw-r--r--vendor/github.com/docker/docker/vendor.conf32
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE6
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/LICENSE22
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/README.markdown133
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/auth.go185
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/change.go43
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client.go1092
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client_unix.go32
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client_windows.go45
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container.go1623
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/distribution.go26
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/env.go172
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/event.go410
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/exec.go213
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/image.go720
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/misc.go188
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/network.go321
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/plugin.go418
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/signal.go49
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm.go156
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_configs.go171
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_node.go130
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go171
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_service.go213
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/swarm_task.go70
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/tar.go122
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/tls.go118
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/volume.go190
-rw-r--r--vendor/github.com/openshift/imagebuilder/LICENSE192
-rw-r--r--vendor/github.com/openshift/imagebuilder/README.md104
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go510
-rw-r--r--vendor/github.com/openshift/imagebuilder/constants.go13
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go570
-rw-r--r--vendor/github.com/openshift/imagebuilder/doc.go6
-rw-r--r--vendor/github.com/openshift/imagebuilder/evaluator.go160
-rw-r--r--vendor/github.com/openshift/imagebuilder/internals.go83
-rw-r--r--vendor/github.com/openshift/imagebuilder/shell_parser.go314
-rw-r--r--vendor/github.com/openshift/imagebuilder/signal/README.md1
-rw-r--r--vendor/github.com/openshift/imagebuilder/signal/signal.go25
-rw-r--r--vendor/github.com/openshift/imagebuilder/signal/signals.go79
-rw-r--r--vendor/github.com/openshift/imagebuilder/strslice/strslice.go30
-rw-r--r--vendor/github.com/projectatomic/buildah/LICENSE201
-rw-r--r--vendor/github.com/projectatomic/buildah/README.md79
-rw-r--r--vendor/github.com/projectatomic/buildah/add.go253
-rw-r--r--vendor/github.com/projectatomic/buildah/buildah.go359
-rw-r--r--vendor/github.com/projectatomic/buildah/commit.go (renamed from libpod/buildah/commit.go)8
-rw-r--r--vendor/github.com/projectatomic/buildah/common.go (renamed from libpod/buildah/common.go)0
-rw-r--r--vendor/github.com/projectatomic/buildah/config.go (renamed from libpod/buildah/config.go)13
-rw-r--r--vendor/github.com/projectatomic/buildah/delete.go18
-rw-r--r--vendor/github.com/projectatomic/buildah/docker/types.go271
-rw-r--r--vendor/github.com/projectatomic/buildah/image.go (renamed from libpod/buildah/image.go)4
-rw-r--r--vendor/github.com/projectatomic/buildah/imagebuildah/build.go775
-rw-r--r--vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go145
-rw-r--r--vendor/github.com/projectatomic/buildah/imagebuildah/util.go96
-rw-r--r--vendor/github.com/projectatomic/buildah/import.go123
-rw-r--r--vendor/github.com/projectatomic/buildah/mount.go17
-rw-r--r--vendor/github.com/projectatomic/buildah/new.go313
-rw-r--r--vendor/github.com/projectatomic/buildah/pkg/cli/common.go130
-rw-r--r--vendor/github.com/projectatomic/buildah/pkg/parse/parse.go323
-rw-r--r--vendor/github.com/projectatomic/buildah/pull.go186
-rw-r--r--vendor/github.com/projectatomic/buildah/run.go479
-rw-r--r--vendor/github.com/projectatomic/buildah/secrets.go198
-rw-r--r--vendor/github.com/projectatomic/buildah/unmount.go11
-rw-r--r--vendor/github.com/projectatomic/buildah/util.go34
-rw-r--r--vendor/github.com/projectatomic/buildah/util/util.go221
-rw-r--r--vendor/github.com/projectatomic/buildah/vendor.conf57
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp.go80
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go176
199 files changed, 22536 insertions, 2010 deletions
diff --git a/.travis.yml b/.travis.yml
index 138a9c9f9..93a23a06e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,7 +7,7 @@ services:
before_install:
- sudo apt-get -qq update
- - sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev libapparmor-dev libseccomp-dev
+ - sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev libapparmor-dev
- sudo apt-get -qq install autoconf automake bison e2fslibs-dev libfuse-dev libtool liblzma-dev gettext
install:
@@ -30,12 +30,10 @@ jobs:
go: 1.9.x
- script:
- make testunit
- - make
go: 1.8.x
- stage: Build and Verify
script:
- make testunit
- - make
go: 1.9.x
- stage: Integration Test
script:
diff --git a/Dockerfile b/Dockerfile
index 30bf36915..d25fe0258 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,5 @@
FROM golang:1.8
-# libseccomp in jessie is not _quite_ new enough -- need backports version
RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list
RUN apt-get update && apt-get install -y \
@@ -22,8 +21,6 @@ RUN apt-get update && apt-get install -y \
libostree-dev \
libprotobuf-dev \
libprotobuf-c0-dev \
- libseccomp2/jessie-backports \
- libseccomp-dev/jessie-backports \
libtool \
libudev-dev \
protobuf-c-compiler \
@@ -41,6 +38,10 @@ RUN apt-get update && apt-get install -y \
--no-install-recommends \
&& apt-get clean
+ADD . /go/src/github.com/projectatomic/libpod
+
+RUN set -x && cd /go/src/github.com/projectatomic/libpod && make install.libseccomp
+
# install criu
ENV CRIU_VERSION 1.7
RUN mkdir -p /usr/src/criu \
@@ -117,5 +118,3 @@ COPY test/policy.json /etc/containers/policy.json
COPY test/redhat_sigstore.yaml /etc/containers/registries.d/registry.access.redhat.com.yaml
WORKDIR /go/src/github.com/projectatomic/libpod
-
-ADD . /go/src/github.com/projectatomic/libpod
diff --git a/Makefile b/Makefile
index 1fa92492f..13948db02 100644
--- a/Makefile
+++ b/Makefile
@@ -29,6 +29,7 @@ COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT := $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}")
BUILD_INFO := $(shell date +%s)
ISODATE := $(shell date --iso-8601)
+LIBSECCOMP_COMMIT := release-2.3
# If GOPATH not specified, use one in the local directory
ifeq ($(GOPATH),)
@@ -249,6 +250,13 @@ install.tools: .install.gitvalidation .install.gometalinter .install.md2man
varlink_generate: .gopathok cmd/podman/varlink/ioprojectatomicpodman.go
+.PHONY: install.libseccomp
+install.libseccomp:
+ rm -rf ../../seccomp/libseccomp
+ git clone https://github.com/seccomp/libseccomp ../../seccomp/libseccomp
+ cd ../../seccomp/libseccomp && git checkout $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && make install
+
+
cmd/podman/varlink/ioprojectatomicpodman.go: cmd/podman/varlink/io.projectatomic.podman.varlink
$(GO) generate ./cmd/podman/varlink/...
@@ -267,4 +275,5 @@ validate: gofmt .gitvalidation
uninstall \
shell \
changelog \
- validate
+ validate \
+ install.libseccomp
diff --git a/cmd/podman/build.go b/cmd/podman/build.go
index 92823f72b..180b43cbc 100644
--- a/cmd/podman/build.go
+++ b/cmd/podman/build.go
@@ -1,270 +1,176 @@
package main
import (
- "fmt"
"os"
- "os/exec"
- "strconv"
+ "path/filepath"
+ "strings"
"github.com/pkg/errors"
+ "github.com/projectatomic/buildah/imagebuildah"
+ buildahcli "github.com/projectatomic/buildah/pkg/cli"
+ "github.com/projectatomic/buildah/pkg/parse"
+ "github.com/projectatomic/libpod/cmd/podman/libpodruntime"
+ "github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
- buildFlags = []cli.Flag{
- // The following flags are emulated from:
- // src/github.com/projectatomic/buildah/cmd/bud.go
- cli.StringFlag{
- Name: "authfile",
- Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
- },
- cli.StringSliceFlag{
- Name: "build-arg",
- Usage: "`argument=value` to supply to the builder",
- },
- cli.StringFlag{
- Name: "cert-dir",
- Value: "",
- Usage: "use certificates at the specified path to access the registry",
- },
- cli.StringFlag{
- Name: "creds",
- Value: "",
- Usage: "use `[username[:password]]` for accessing the registry",
- },
- cli.StringSliceFlag{
- Name: "file, f",
- Usage: "`pathname or URL` of a Dockerfile",
- },
- cli.StringFlag{
- Name: "format",
- Usage: "`format` of the built image's manifest and metadata",
- },
- cli.BoolFlag{
- Name: "pull-always",
- Usage: "pull the image, even if a version is present",
- },
- cli.BoolFlag{
- Name: "quiet, q",
- Usage: "refrain from announcing build instructions and image read/write progress",
- },
- cli.StringFlag{
- Name: "runtime",
- Usage: "`path` to an alternate runtime",
- },
- cli.StringSliceFlag{
- Name: "runtime-flag",
- Usage: "add global flags for the container runtime",
- },
- cli.StringFlag{
- Name: "signature-policy",
- Usage: "`pathname` of signature policy file (not usually used)",
- },
- cli.StringSliceFlag{
- Name: "tag, t",
- Usage: "`tag` to apply to the built image",
- },
- cli.BoolFlag{
- Name: "tls-verify",
- Usage: "require HTTPS and verify certificates when accessing the registry",
- },
- // The following flags are emulated from:
- // src/github.com/projectatomic/buildah/cmd/common.go fromAndBudFlags
- cli.StringSliceFlag{
- Name: "add-host",
- Usage: "add a custom host-to-IP mapping (host:ip) (default [])",
- },
- cli.StringFlag{
- Name: "cgroup-parent",
- Usage: "optional parent cgroup for the container",
- },
- cli.Uint64Flag{
- Name: "cpu-period",
- Usage: "limit the CPU CFS (Completely Fair Scheduler) period",
- },
- cli.Int64Flag{
- Name: "cpu-quota",
- Usage: "limit the CPU CFS (Completely Fair Scheduler) quota",
- },
- cli.Uint64Flag{
- Name: "cpu-shares",
- Usage: "CPU shares (relative weight)",
- },
- cli.StringFlag{
- Name: "cpuset-cpus",
- Usage: "CPUs in which to allow execution (0-3, 0,1)",
- },
- cli.StringFlag{
- Name: "cpuset-mems",
- Usage: "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
- },
- cli.StringFlag{
- Name: "memory, m",
- Usage: "memory limit (format: <number>[<unit>], where unit = b, k, m or g)",
- },
- cli.StringFlag{
- Name: "memory-swap",
- Usage: "swap limit equal to memory plus swap: '-1' to enable unlimited swap",
- },
- cli.StringSliceFlag{
- Name: "security-opt",
- Usage: "security Options (default [])",
- },
- cli.StringFlag{
- Name: "shm-size",
- Usage: "size of `/dev/shm`. The format is `<number><unit>`.",
- Value: "65536k",
- },
- cli.StringSliceFlag{
- Name: "ulimit",
- Usage: "ulimit options (default [])",
- },
- cli.StringSliceFlag{
- Name: "volume, v",
- Usage: "bind mount a volume into the container (default [])",
- },
- }
buildDescription = "podman build launches the Buildah command to build an OCI Image. Buildah must be installed for this command to work."
buildCommand = cli.Command{
- Name: "build",
- Usage: "Build an image using instructions in a Dockerfile",
- Description: buildDescription,
- Flags: buildFlags,
- Action: buildCmd,
- ArgsUsage: "CONTEXT-DIRECTORY | URL",
+ Name: "build",
+ Usage: "Build an image using instructions in a Dockerfile",
+ Description: buildDescription,
+ Flags: append(buildahcli.BudFlags, buildahcli.FromAndBudFlags...),
+ Action: buildCmd,
+ ArgsUsage: "CONTEXT-DIRECTORY | URL",
+ SkipArgReorder: true,
}
)
func buildCmd(c *cli.Context) error {
-
- budCmdArgs := []string{}
-
- // Handle Global Options
- logLevel := c.GlobalString("log-level")
- if logLevel == "debug" {
- budCmdArgs = append(budCmdArgs, "--debug")
- }
- if c.GlobalIsSet("root") {
- budCmdArgs = append(budCmdArgs, "--root", c.GlobalString("root"))
- }
- if c.GlobalIsSet("runroot") {
- budCmdArgs = append(budCmdArgs, "--runroot", c.GlobalString("runroot"))
- }
- if c.GlobalIsSet("storage-driver") {
- budCmdArgs = append(budCmdArgs, "--storage-driver", c.GlobalString("storage-driver"))
- }
- for _, storageOpt := range c.GlobalStringSlice("storage-opt") {
- budCmdArgs = append(budCmdArgs, "--storage-opt", storageOpt)
- }
-
- budCmdArgs = append(budCmdArgs, "bud")
-
- // Buildah bud specific options
- if c.IsSet("authfile") {
- budCmdArgs = append(budCmdArgs, "--authfile", c.String("authfile"))
- }
- for _, buildArg := range c.StringSlice("build-arg") {
- budCmdArgs = append(budCmdArgs, "--build-arg", buildArg)
- }
- if c.IsSet("cert-dir") {
- budCmdArgs = append(budCmdArgs, "--cert-dir", c.String("cert-dir"))
- }
- if c.IsSet("creds") {
- budCmdArgs = append(budCmdArgs, "--creds", c.String("creds"))
- }
- for _, fileName := range c.StringSlice("file") {
- budCmdArgs = append(budCmdArgs, "--file", fileName)
- }
+ // The following was taken directly from projectatomic/buildah/cmd/bud.go
+ // TODO Find a away to vendor more of this in rather than copy from bud
+ output := ""
+ tags := []string{}
+ if c.IsSet("tag") || c.IsSet("t") {
+ tags = c.StringSlice("tag")
+ if len(tags) > 0 {
+ output = tags[0]
+ tags = tags[1:]
+ }
+ }
+ pullPolicy := imagebuildah.PullNever
+ if c.BoolT("pull") {
+ pullPolicy = imagebuildah.PullIfMissing
+ }
+ if c.Bool("pull-always") {
+ pullPolicy = imagebuildah.PullAlways
+ }
+
+ args := make(map[string]string)
+ if c.IsSet("build-arg") {
+ for _, arg := range c.StringSlice("build-arg") {
+ av := strings.SplitN(arg, "=", 2)
+ if len(av) > 1 {
+ args[av[0]] = av[1]
+ } else {
+ delete(args, av[0])
+ }
+ }
+ }
+
+ dockerfiles := c.StringSlice("file")
+ format := "oci"
if c.IsSet("format") {
- budCmdArgs = append(budCmdArgs, "--format", c.String("format"))
- }
- if c.IsSet("pull-always") {
- budCmdArgs = append(budCmdArgs, "--pull-always")
- }
- if c.IsSet("quiet") {
- quietParam := "--quiet=" + strconv.FormatBool(c.Bool("quiet"))
- budCmdArgs = append(budCmdArgs, quietParam)
- }
- if c.IsSet("runtime") {
- budCmdArgs = append(budCmdArgs, "--runtime", c.String("runtime"))
- }
- for _, runtimeArg := range c.StringSlice("runtime-flag") {
- budCmdArgs = append(budCmdArgs, "--runtime-flag", runtimeArg)
- }
- if c.IsSet("signature-policy") {
- budCmdArgs = append(budCmdArgs, "--signature-policy", c.String("signature-policy"))
- }
- for _, tagArg := range c.StringSlice("tag") {
- budCmdArgs = append(budCmdArgs, "--tag", tagArg)
- }
- if c.IsSet("tls-verify") {
- tlsParam := "--tls-verify=" + strconv.FormatBool(c.Bool("tls-verify"))
- budCmdArgs = append(budCmdArgs, tlsParam)
- }
-
- // Buildah bud and from options from cmd/buildah/common.go
- for _, addHostArg := range c.StringSlice("add-host") {
- budCmdArgs = append(budCmdArgs, "--add-host", addHostArg)
- }
- if c.IsSet("cgroup-parent") {
- budCmdArgs = append(budCmdArgs, "--cgroup-parent", c.String("cgroup-parent"))
- }
- if c.IsSet("cpu-period") {
- budCmdArgs = append(budCmdArgs, "--cpu-period", fmt.Sprintf("%v", c.Int64("cpu-period")))
- }
- if c.IsSet("cpu-quota") {
- budCmdArgs = append(budCmdArgs, "--cpu-quota", fmt.Sprintf("%v", c.Uint64("cpu-quota")))
- }
- if c.IsSet("cpu-shares") {
- budCmdArgs = append(budCmdArgs, "--cpu-shares", fmt.Sprintf("%v", c.Uint64("cpu-shares")))
- }
- if c.IsSet("cpuset-cpus") {
- budCmdArgs = append(budCmdArgs, "--cpuset-cpus", c.String("cpuset-cpus"))
- }
- if c.IsSet("cpuset-mems") {
- budCmdArgs = append(budCmdArgs, "--cpuset-mems", c.String("cpuset-mems"))
- }
- if c.IsSet("memory") {
- budCmdArgs = append(budCmdArgs, "--memory", c.String("memory"))
- }
- if c.IsSet("memory-swap") {
- budCmdArgs = append(budCmdArgs, "--memory-swap", c.String("memory-swap"))
- }
- for _, securityOptArg := range c.StringSlice("security-opt") {
- budCmdArgs = append(budCmdArgs, "--security-opt", securityOptArg)
- }
- if c.IsSet("shm-size") {
- budCmdArgs = append(budCmdArgs, "--shm-size", c.String("shm-size"))
- }
- for _, ulimitArg := range c.StringSlice("ulimit") {
- budCmdArgs = append(budCmdArgs, "--ulimit", ulimitArg)
- }
- for _, volumeArg := range c.StringSlice("volume") {
- budCmdArgs = append(budCmdArgs, "--volume", volumeArg)
- }
-
- if len(c.Args()) > 0 {
- budCmdArgs = append(budCmdArgs, c.Args()...)
- }
-
- buildah := "buildah"
-
- if _, err := exec.LookPath(buildah); err != nil {
- return errors.Wrapf(err, "buildah not found in PATH")
- }
- if _, err := exec.Command(buildah).Output(); err != nil {
- return errors.Wrapf(err, "buildah is not operational on this server")
- }
-
- cmd := exec.Command(buildah, budCmdArgs...)
-
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
-
- if err := cmd.Run(); err != nil {
- return errors.Wrapf(err, "error running the buildah build-using-dockerfile (bud) command")
- }
-
- return nil
+ format = strings.ToLower(c.String("format"))
+ }
+ if strings.HasPrefix(format, "oci") {
+ format = imagebuildah.OCIv1ImageFormat
+ } else if strings.HasPrefix(format, "docker") {
+ format = imagebuildah.Dockerv2ImageFormat
+ } else {
+ return errors.Errorf("unrecognized image type %q", format)
+ }
+ contextDir := ""
+ cliArgs := c.Args()
+ if len(cliArgs) > 0 {
+ // The context directory could be a URL. Try to handle that.
+ tempDir, subDir, err := imagebuildah.TempDirForURL("", "buildah", cliArgs[0])
+ if err != nil {
+ return errors.Wrapf(err, "error prepping temporary context directory")
+ }
+ if tempDir != "" {
+ // We had to download it to a temporary directory.
+ // Delete it later.
+ defer func() {
+ if err = os.RemoveAll(tempDir); err != nil {
+ logrus.Errorf("error removing temporary directory %q: %v", contextDir, err)
+ }
+ }()
+ contextDir = filepath.Join(tempDir, subDir)
+ } else {
+ // Nope, it was local. Use it as is.
+ absDir, err := filepath.Abs(cliArgs[0])
+ if err != nil {
+ return errors.Wrapf(err, "error determining path to directory %q", cliArgs[0])
+ }
+ contextDir = absDir
+ }
+ cliArgs = cliArgs.Tail()
+ } else {
+ // No context directory or URL was specified. Try to use the
+ // home of the first locally-available Dockerfile.
+ for i := range dockerfiles {
+ if strings.HasPrefix(dockerfiles[i], "http://") ||
+ strings.HasPrefix(dockerfiles[i], "https://") ||
+ strings.HasPrefix(dockerfiles[i], "git://") ||
+ strings.HasPrefix(dockerfiles[i], "github.com/") {
+ continue
+ }
+ absFile, err := filepath.Abs(dockerfiles[i])
+ if err != nil {
+ return errors.Wrapf(err, "error determining path to file %q", dockerfiles[i])
+ }
+ contextDir = filepath.Dir(absFile)
+ dockerfiles[i], err = filepath.Rel(contextDir, absFile)
+ if err != nil {
+ return errors.Wrapf(err, "error determining path to file %q", dockerfiles[i])
+ }
+ break
+ }
+ }
+ if contextDir == "" {
+ return errors.Errorf("no context directory specified, and no dockerfile specified")
+ }
+ if len(dockerfiles) == 0 {
+ dockerfiles = append(dockerfiles, filepath.Join(contextDir, "Dockerfile"))
+ }
+ if err := parse.ValidateFlags(c, buildahcli.BudFlags); err != nil {
+ return err
+ }
+
+ runtimeFlags := []string{}
+ for _, arg := range c.StringSlice("runtime-flag") {
+ runtimeFlags = append(runtimeFlags, "--"+arg)
+ }
+ // end from buildah
+
+ runtime, err := libpodruntime.GetRuntime(c)
+ if err != nil {
+ return errors.Wrapf(err, "could not get runtime")
+ }
+ defer runtime.Shutdown(false)
+
+ systemContext, err := parse.SystemContextFromOptions(c)
+ if err != nil {
+ return errors.Wrapf(err, "error building system context")
+ }
+
+ commonOpts, err := parse.ParseCommonBuildOptions(c)
+ if err != nil {
+ return err
+ }
+
+ options := imagebuildah.BuildOptions{
+ ContextDirectory: contextDir,
+ PullPolicy: pullPolicy,
+ Compression: imagebuildah.Gzip,
+ Quiet: c.Bool("quiet"),
+ SignaturePolicyPath: c.String("signature-policy"),
+ Args: args,
+ Output: output,
+ AdditionalTags: tags,
+ Runtime: c.String("runtime"),
+ RuntimeArgs: runtimeFlags,
+ OutputFormat: format,
+ SystemContext: systemContext,
+ CommonBuildOpts: commonOpts,
+ DefaultMountsFilePath: c.GlobalString("default-mounts-file"),
+ }
+
+ if !c.Bool("quiet") {
+ options.ReportWriter = os.Stderr
+ }
+
+ return runtime.Build(getContext(), options, dockerfiles...)
}
diff --git a/cmd/podman/commit.go b/cmd/podman/commit.go
index c721c8700..14b7ddace 100644
--- a/cmd/podman/commit.go
+++ b/cmd/podman/commit.go
@@ -7,9 +7,9 @@ import (
"strings"
"github.com/pkg/errors"
+ "github.com/projectatomic/buildah"
"github.com/projectatomic/libpod/cmd/podman/libpodruntime"
"github.com/projectatomic/libpod/libpod"
- "github.com/projectatomic/libpod/libpod/buildah"
"github.com/projectatomic/libpod/libpod/image"
"github.com/projectatomic/libpod/pkg/util"
"github.com/urfave/cli"
@@ -22,6 +22,11 @@ var (
Usage: "Apply the following possible instructions to the created image (default []): CMD | ENTRYPOINT | ENV | EXPOSE | LABEL | STOPSIGNAL | USER | VOLUME | WORKDIR",
},
cli.StringFlag{
+ Name: "format, f",
+ Usage: "`format` of the image manifest and metadata",
+ Value: "oci",
+ },
+ cli.StringFlag{
Name: "message, m",
Usage: "Set commit message for imported image",
},
@@ -63,12 +68,25 @@ func commitCmd(c *cli.Context) error {
defer runtime.Shutdown(false)
var (
- writer io.Writer
+ writer io.Writer
+ mimeType string
)
args := c.Args()
if len(args) != 2 {
return errors.Errorf("you must provide a container name or ID and a target image name")
}
+
+ switch c.String("format") {
+ case "oci":
+ mimeType = buildah.OCIv1ImageManifest
+ if c.IsSet("message") {
+ return errors.Errorf("messages cannot be added to the OCIv1 image format.")
+ }
+ case "docker":
+ mimeType = buildah.Dockerv2ImageManifest
+ default:
+ return errors.Errorf("unrecognized image format %q", c.String("format"))
+ }
container := args[0]
reference := args[1]
if c.IsSet("change") {
@@ -90,9 +108,10 @@ func commitCmd(c *cli.Context) error {
sc := image.GetSystemContext(runtime.GetConfig().SignaturePolicyPath, "", false)
coptions := buildah.CommitOptions{
- SignaturePolicyPath: runtime.GetConfig().SignaturePolicyPath,
- ReportWriter: writer,
- SystemContext: sc,
+ SignaturePolicyPath: runtime.GetConfig().SignaturePolicyPath,
+ ReportWriter: writer,
+ SystemContext: sc,
+ PreferredManifestType: mimeType,
}
options := libpod.ContainerCommitOptions{
CommitOptions: coptions,
diff --git a/cmd/podman/varlink/ioprojectatomicpodman.go b/cmd/podman/varlink/ioprojectatomicpodman.go
index 41e2b7f29..028af3e57 100644
--- a/cmd/podman/varlink/ioprojectatomicpodman.go
+++ b/cmd/podman/varlink/ioprojectatomicpodman.go
@@ -4,14 +4,6 @@ package ioprojectatomicpodman
import "github.com/varlink/go/varlink"
// Type declarations
-type Version struct {
- Version string `json:"version"`
- Go_version string `json:"go_version"`
- Git_commit string `json:"git_commit"`
- Built int64 `json:"built"`
- Os_arch string `json:"os_arch"`
-}
-
type NotImplemented struct {
Comment string `json:"comment"`
}
@@ -49,30 +41,42 @@ type ImageSearch struct {
Star_count int64 `json:"star_count"`
}
+type Version struct {
+ Version string `json:"version"`
+ Go_version string `json:"go_version"`
+ Git_commit string `json:"git_commit"`
+ Built int64 `json:"built"`
+ Os_arch string `json:"os_arch"`
+}
+
// Client method calls and reply readers
-func InspectContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.InspectContainer", nil, more__, oneway__)
+func PullImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string) error {
+ var in struct {
+ Name string `json:"name"`
+ }
+ in.Name = name_
+ return c__.Send("io.projectatomic.podman.PullImage", in, more__, oneway__)
}
-func ReadInspectContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadPullImage_(c__ *varlink.Connection, id_ *string) (bool, error) {
var out struct {
- Notimplemented NotImplemented `json:"notimplemented"`
+ Id string `json:"id"`
}
continues_, err := c__.Receive(&out)
if err != nil {
return false, err
}
- if notimplemented_ != nil {
- *notimplemented_ = out.Notimplemented
+ if id_ != nil {
+ *id_ = out.Id
}
return continues_, nil
}
-func ListContainerChanges(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.ListContainerChanges", nil, more__, oneway__)
+func AttachToContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.AttachToContainer", nil, more__, oneway__)
}
-func ReadListContainerChanges_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadAttachToContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -86,11 +90,29 @@ func ReadListContainerChanges_(c__ *varlink.Connection, notimplemented_ *NotImpl
return continues_, nil
}
-func GetContainerStats(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.GetContainerStats", nil, more__, oneway__)
+func ListImages(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.ListImages", nil, more__, oneway__)
}
-func ReadGetContainerStats_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadListImages_(c__ *varlink.Connection, images_ *[]ImageInList) (bool, error) {
+ var out struct {
+ Images []ImageInList `json:"images"`
+ }
+ continues_, err := c__.Receive(&out)
+ if err != nil {
+ return false, err
+ }
+ if images_ != nil {
+ *images_ = []ImageInList(out.Images)
+ }
+ return continues_, nil
+}
+
+func CreateImage(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.CreateImage", nil, more__, oneway__)
+}
+
+func ReadCreateImage_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -126,57 +148,49 @@ func ReadInspectImage_(c__ *varlink.Connection, image_ *string) (bool, error) {
return continues_, nil
}
-func DeleteUnusedImages(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.DeleteUnusedImages", nil, more__, oneway__)
+func PushImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string, tag_ string, tlsverify_ bool) error {
+ var in struct {
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Tlsverify bool `json:"tlsverify"`
+ }
+ in.Name = name_
+ in.Tag = tag_
+ in.Tlsverify = tlsverify_
+ return c__.Send("io.projectatomic.podman.PushImage", in, more__, oneway__)
}
-func ReadDeleteUnusedImages_(c__ *varlink.Connection, images_ *[]string) (bool, error) {
- var out struct {
- Images []string `json:"images"`
- }
- continues_, err := c__.Receive(&out)
+func ReadPushImage_(c__ *varlink.Connection) (bool, error) {
+ continues_, err := c__.Receive(nil)
if err != nil {
return false, err
}
- if images_ != nil {
- *images_ = []string(out.Images)
- }
return continues_, nil
}
-func ImportImage(c__ *varlink.Connection, more__ bool, oneway__ bool, source_ string, reference_ string, message_ string, changes_ []string) error {
+func TagImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string, tagged_ string) error {
var in struct {
- Source string `json:"source"`
- Reference string `json:"reference"`
- Message string `json:"message"`
- Changes []string `json:"changes"`
+ Name string `json:"name"`
+ Tagged string `json:"tagged"`
}
- in.Source = source_
- in.Reference = reference_
- in.Message = message_
- in.Changes = []string(changes_)
- return c__.Send("io.projectatomic.podman.ImportImage", in, more__, oneway__)
+ in.Name = name_
+ in.Tagged = tagged_
+ return c__.Send("io.projectatomic.podman.TagImage", in, more__, oneway__)
}
-func ReadImportImage_(c__ *varlink.Connection, id_ *string) (bool, error) {
- var out struct {
- Id string `json:"id"`
- }
- continues_, err := c__.Receive(&out)
+func ReadTagImage_(c__ *varlink.Connection) (bool, error) {
+ continues_, err := c__.Receive(nil)
if err != nil {
return false, err
}
- if id_ != nil {
- *id_ = out.Id
- }
return continues_, nil
}
-func ListContainers(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.ListContainers", nil, more__, oneway__)
+func ListContainerChanges(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.ListContainerChanges", nil, more__, oneway__)
}
-func ReadListContainers_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadListContainerChanges_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -190,11 +204,11 @@ func ReadListContainers_(c__ *varlink.Connection, notimplemented_ *NotImplemente
return continues_, nil
}
-func AttachToContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.AttachToContainer", nil, more__, oneway__)
+func PauseContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.PauseContainer", nil, more__, oneway__)
}
-func ReadAttachToContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadPauseContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -208,11 +222,11 @@ func ReadAttachToContainer_(c__ *varlink.Connection, notimplemented_ *NotImpleme
return continues_, nil
}
-func RemoveContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.RemoveContainer", nil, more__, oneway__)
+func BuildImage(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.BuildImage", nil, more__, oneway__)
}
-func ReadRemoveContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadBuildImage_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -226,11 +240,11 @@ func ReadRemoveContainer_(c__ *varlink.Connection, notimplemented_ *NotImplement
return continues_, nil
}
-func KillContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.KillContainer", nil, more__, oneway__)
+func InspectContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.InspectContainer", nil, more__, oneway__)
}
-func ReadKillContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadInspectContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -244,11 +258,11 @@ func ReadKillContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented
return continues_, nil
}
-func ResizeContainerTty(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.ResizeContainerTty", nil, more__, oneway__)
+func GetContainerStats(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.GetContainerStats", nil, more__, oneway__)
}
-func ReadResizeContainerTty_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadGetContainerStats_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -262,11 +276,11 @@ func ReadResizeContainerTty_(c__ *varlink.Connection, notimplemented_ *NotImplem
return continues_, nil
}
-func StartContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.StartContainer", nil, more__, oneway__)
+func RestartContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.RestartContainer", nil, more__, oneway__)
}
-func ReadStartContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadRestartContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -280,11 +294,11 @@ func ReadStartContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemente
return continues_, nil
}
-func StopContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.StopContainer", nil, more__, oneway__)
+func KillContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.KillContainer", nil, more__, oneway__)
}
-func ReadStopContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadKillContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -298,11 +312,11 @@ func ReadStopContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented
return continues_, nil
}
-func UnpauseContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.UnpauseContainer", nil, more__, oneway__)
+func WaitContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.WaitContainer", nil, more__, oneway__)
}
-func ReadUnpauseContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadWaitContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -316,20 +330,30 @@ func ReadUnpauseContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemen
return continues_, nil
}
-func GetVersion(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.GetVersion", nil, more__, oneway__)
+func ImportImage(c__ *varlink.Connection, more__ bool, oneway__ bool, source_ string, reference_ string, message_ string, changes_ []string) error {
+ var in struct {
+ Source string `json:"source"`
+ Reference string `json:"reference"`
+ Message string `json:"message"`
+ Changes []string `json:"changes"`
+ }
+ in.Source = source_
+ in.Reference = reference_
+ in.Message = message_
+ in.Changes = []string(changes_)
+ return c__.Send("io.projectatomic.podman.ImportImage", in, more__, oneway__)
}
-func ReadGetVersion_(c__ *varlink.Connection, version_ *Version) (bool, error) {
+func ReadImportImage_(c__ *varlink.Connection, id_ *string) (bool, error) {
var out struct {
- Version Version `json:"version"`
+ Id string `json:"id"`
}
continues_, err := c__.Receive(&out)
if err != nil {
return false, err
}
- if version_ != nil {
- *version_ = out.Version
+ if id_ != nil {
+ *id_ = out.Id
}
return continues_, nil
}
@@ -352,29 +376,11 @@ func ReadPing_(c__ *varlink.Connection, ping_ *StringResponse) (bool, error) {
return continues_, nil
}
-func WaitContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.WaitContainer", nil, more__, oneway__)
-}
-
-func ReadWaitContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
- var out struct {
- Notimplemented NotImplemented `json:"notimplemented"`
- }
- continues_, err := c__.Receive(&out)
- if err != nil {
- return false, err
- }
- if notimplemented_ != nil {
- *notimplemented_ = out.Notimplemented
- }
- return continues_, nil
-}
-
-func BuildImage(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.BuildImage", nil, more__, oneway__)
+func CreateContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.CreateContainer", nil, more__, oneway__)
}
-func ReadBuildImage_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadCreateContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -388,66 +394,6 @@ func ReadBuildImage_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (
return continues_, nil
}
-func HistoryImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string) error {
- var in struct {
- Name string `json:"name"`
- }
- in.Name = name_
- return c__.Send("io.projectatomic.podman.HistoryImage", in, more__, oneway__)
-}
-
-func ReadHistoryImage_(c__ *varlink.Connection, history_ *[]ImageHistory) (bool, error) {
- var out struct {
- History []ImageHistory `json:"history"`
- }
- continues_, err := c__.Receive(&out)
- if err != nil {
- return false, err
- }
- if history_ != nil {
- *history_ = []ImageHistory(out.History)
- }
- return continues_, nil
-}
-
-func PushImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string, tag_ string, tlsverify_ bool) error {
- var in struct {
- Name string `json:"name"`
- Tag string `json:"tag"`
- Tlsverify bool `json:"tlsverify"`
- }
- in.Name = name_
- in.Tag = tag_
- in.Tlsverify = tlsverify_
- return c__.Send("io.projectatomic.podman.PushImage", in, more__, oneway__)
-}
-
-func ReadPushImage_(c__ *varlink.Connection) (bool, error) {
- continues_, err := c__.Receive(nil)
- if err != nil {
- return false, err
- }
- return continues_, nil
-}
-
-func TagImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string, tagged_ string) error {
- var in struct {
- Name string `json:"name"`
- Tagged string `json:"tagged"`
- }
- in.Name = name_
- in.Tagged = tagged_
- return c__.Send("io.projectatomic.podman.TagImage", in, more__, oneway__)
-}
-
-func ReadTagImage_(c__ *varlink.Connection) (bool, error) {
- continues_, err := c__.Receive(nil)
- if err != nil {
- return false, err
- }
- return continues_, nil
-}
-
func SearchImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string, limit_ int64) error {
var in struct {
Name string `json:"name"`
@@ -492,11 +438,11 @@ func ReadExportImage_(c__ *varlink.Connection) (bool, error) {
return continues_, nil
}
-func RenameContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.RenameContainer", nil, more__, oneway__)
+func ExportContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.ExportContainer", nil, more__, oneway__)
}
-func ReadRenameContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadExportContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -510,29 +456,29 @@ func ReadRenameContainer_(c__ *varlink.Connection, notimplemented_ *NotImplement
return continues_, nil
}
-func RemoveImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string, force_ bool) error {
- var in struct {
- Name string `json:"name"`
- Force bool `json:"force"`
- }
- in.Name = name_
- in.Force = force_
- return c__.Send("io.projectatomic.podman.RemoveImage", in, more__, oneway__)
+func RemoveContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.RemoveContainer", nil, more__, oneway__)
}
-func ReadRemoveImage_(c__ *varlink.Connection) (bool, error) {
- continues_, err := c__.Receive(nil)
+func ReadRemoveContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+ var out struct {
+ Notimplemented NotImplemented `json:"notimplemented"`
+ }
+ continues_, err := c__.Receive(&out)
if err != nil {
return false, err
}
+ if notimplemented_ != nil {
+ *notimplemented_ = out.Notimplemented
+ }
return continues_, nil
}
-func ExportContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.ExportContainer", nil, more__, oneway__)
+func CreateFromContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.CreateFromContainer", nil, more__, oneway__)
}
-func ReadExportContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadCreateFromContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -546,11 +492,11 @@ func ReadExportContainer_(c__ *varlink.Connection, notimplemented_ *NotImplement
return continues_, nil
}
-func RestartContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.RestartContainer", nil, more__, oneway__)
+func StopContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.StopContainer", nil, more__, oneway__)
}
-func ReadRestartContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadStopContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -564,29 +510,29 @@ func ReadRestartContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemen
return continues_, nil
}
-func UpdateContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.UpdateContainer", nil, more__, oneway__)
+func DeleteUnusedImages(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.DeleteUnusedImages", nil, more__, oneway__)
}
-func ReadUpdateContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadDeleteUnusedImages_(c__ *varlink.Connection, images_ *[]string) (bool, error) {
var out struct {
- Notimplemented NotImplemented `json:"notimplemented"`
+ Images []string `json:"images"`
}
continues_, err := c__.Receive(&out)
if err != nil {
return false, err
}
- if notimplemented_ != nil {
- *notimplemented_ = out.Notimplemented
+ if images_ != nil {
+ *images_ = []string(out.Images)
}
return continues_, nil
}
-func PauseContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.PauseContainer", nil, more__, oneway__)
+func GetContainerLogs(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.GetContainerLogs", nil, more__, oneway__)
}
-func ReadPauseContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadGetContainerLogs_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -600,11 +546,11 @@ func ReadPauseContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemente
return continues_, nil
}
-func DeleteStoppedContainers(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.DeleteStoppedContainers", nil, more__, oneway__)
+func UpdateContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.UpdateContainer", nil, more__, oneway__)
}
-func ReadDeleteStoppedContainers_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadUpdateContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -618,11 +564,11 @@ func ReadDeleteStoppedContainers_(c__ *varlink.Connection, notimplemented_ *NotI
return continues_, nil
}
-func CreateImage(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.CreateImage", nil, more__, oneway__)
+func RenameContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.RenameContainer", nil, more__, oneway__)
}
-func ReadCreateImage_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadRenameContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -636,11 +582,11 @@ func ReadCreateImage_(c__ *varlink.Connection, notimplemented_ *NotImplemented)
return continues_, nil
}
-func CreateFromContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.CreateFromContainer", nil, more__, oneway__)
+func UnpauseContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.UnpauseContainer", nil, more__, oneway__)
}
-func ReadCreateFromContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadUnpauseContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -654,42 +600,60 @@ func ReadCreateFromContainer_(c__ *varlink.Connection, notimplemented_ *NotImple
return continues_, nil
}
-func PullImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string) error {
+func HistoryImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string) error {
var in struct {
Name string `json:"name"`
}
in.Name = name_
- return c__.Send("io.projectatomic.podman.PullImage", in, more__, oneway__)
+ return c__.Send("io.projectatomic.podman.HistoryImage", in, more__, oneway__)
}
-func ReadPullImage_(c__ *varlink.Connection, id_ *string) (bool, error) {
+func ReadHistoryImage_(c__ *varlink.Connection, history_ *[]ImageHistory) (bool, error) {
var out struct {
- Id string `json:"id"`
+ History []ImageHistory `json:"history"`
}
continues_, err := c__.Receive(&out)
if err != nil {
return false, err
}
- if id_ != nil {
- *id_ = out.Id
+ if history_ != nil {
+ *history_ = []ImageHistory(out.History)
}
return continues_, nil
}
-func GetContainerLogs(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.GetContainerLogs", nil, more__, oneway__)
+func RemoveImage(c__ *varlink.Connection, more__ bool, oneway__ bool, name_ string, force_ bool) error {
+ var in struct {
+ Name string `json:"name"`
+ Force bool `json:"force"`
+ }
+ in.Name = name_
+ in.Force = force_
+ return c__.Send("io.projectatomic.podman.RemoveImage", in, more__, oneway__)
}
-func ReadGetContainerLogs_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadRemoveImage_(c__ *varlink.Connection) (bool, error) {
+ continues_, err := c__.Receive(nil)
+ if err != nil {
+ return false, err
+ }
+ return continues_, nil
+}
+
+func GetVersion(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.GetVersion", nil, more__, oneway__)
+}
+
+func ReadGetVersion_(c__ *varlink.Connection, version_ *Version) (bool, error) {
var out struct {
- Notimplemented NotImplemented `json:"notimplemented"`
+ Version Version `json:"version"`
}
continues_, err := c__.Receive(&out)
if err != nil {
return false, err
}
- if notimplemented_ != nil {
- *notimplemented_ = out.Notimplemented
+ if version_ != nil {
+ *version_ = out.Version
}
return continues_, nil
}
@@ -712,29 +676,65 @@ func ReadListContainerProcesses_(c__ *varlink.Connection, notimplemented_ *NotIm
return continues_, nil
}
-func ListImages(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.ListImages", nil, more__, oneway__)
+func StartContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.StartContainer", nil, more__, oneway__)
}
-func ReadListImages_(c__ *varlink.Connection, images_ *[]ImageInList) (bool, error) {
+func ReadStartContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
- Images []ImageInList `json:"images"`
+ Notimplemented NotImplemented `json:"notimplemented"`
}
continues_, err := c__.Receive(&out)
if err != nil {
return false, err
}
- if images_ != nil {
- *images_ = []ImageInList(out.Images)
+ if notimplemented_ != nil {
+ *notimplemented_ = out.Notimplemented
}
return continues_, nil
}
-func CreateContainer(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
- return c__.Send("io.projectatomic.podman.CreateContainer", nil, more__, oneway__)
+func DeleteStoppedContainers(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.DeleteStoppedContainers", nil, more__, oneway__)
}
-func ReadCreateContainer_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+func ReadDeleteStoppedContainers_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+ var out struct {
+ Notimplemented NotImplemented `json:"notimplemented"`
+ }
+ continues_, err := c__.Receive(&out)
+ if err != nil {
+ return false, err
+ }
+ if notimplemented_ != nil {
+ *notimplemented_ = out.Notimplemented
+ }
+ return continues_, nil
+}
+
+func ListContainers(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.ListContainers", nil, more__, oneway__)
+}
+
+func ReadListContainers_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
+ var out struct {
+ Notimplemented NotImplemented `json:"notimplemented"`
+ }
+ continues_, err := c__.Receive(&out)
+ if err != nil {
+ return false, err
+ }
+ if notimplemented_ != nil {
+ *notimplemented_ = out.Notimplemented
+ }
+ return continues_, nil
+}
+
+func ResizeContainerTty(c__ *varlink.Connection, more__ bool, oneway__ bool) error {
+ return c__.Send("io.projectatomic.podman.ResizeContainerTty", nil, more__, oneway__)
+}
+
+func ReadResizeContainerTty_(c__ *varlink.Connection, notimplemented_ *NotImplemented) (bool, error) {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -750,43 +750,43 @@ func ReadCreateContainer_(c__ *varlink.Connection, notimplemented_ *NotImplement
// Service interface with all methods
type ioprojectatomicpodmanInterface interface {
- PauseContainer(c__ VarlinkCall) error
- DeleteStoppedContainers(c__ VarlinkCall) error
- CreateImage(c__ VarlinkCall) error
- CreateFromContainer(c__ VarlinkCall) error
- PullImage(c__ VarlinkCall, name_ string) error
- GetContainerLogs(c__ VarlinkCall) error
+ BuildImage(c__ VarlinkCall) error
RestartContainer(c__ VarlinkCall) error
- UpdateContainer(c__ VarlinkCall) error
- CreateContainer(c__ VarlinkCall) error
- ListContainerProcesses(c__ VarlinkCall) error
- ListImages(c__ VarlinkCall) error
- GetContainerStats(c__ VarlinkCall) error
- InspectImage(c__ VarlinkCall, name_ string) error
- DeleteUnusedImages(c__ VarlinkCall) error
+ KillContainer(c__ VarlinkCall) error
+ WaitContainer(c__ VarlinkCall) error
ImportImage(c__ VarlinkCall, source_ string, reference_ string, message_ string, changes_ []string) error
- ListContainers(c__ VarlinkCall) error
+ Ping(c__ VarlinkCall) error
+ CreateContainer(c__ VarlinkCall) error
InspectContainer(c__ VarlinkCall) error
- ListContainerChanges(c__ VarlinkCall) error
- KillContainer(c__ VarlinkCall) error
- AttachToContainer(c__ VarlinkCall) error
+ GetContainerStats(c__ VarlinkCall) error
+ ExportContainer(c__ VarlinkCall) error
RemoveContainer(c__ VarlinkCall) error
+ SearchImage(c__ VarlinkCall, name_ string, limit_ int64) error
+ ExportImage(c__ VarlinkCall, name_ string, destination_ string, compress_ bool) error
StopContainer(c__ VarlinkCall) error
+ DeleteUnusedImages(c__ VarlinkCall) error
+ CreateFromContainer(c__ VarlinkCall) error
+ RenameContainer(c__ VarlinkCall) error
UnpauseContainer(c__ VarlinkCall) error
+ HistoryImage(c__ VarlinkCall, name_ string) error
+ RemoveImage(c__ VarlinkCall, name_ string, force_ bool) error
GetVersion(c__ VarlinkCall) error
+ ListContainerProcesses(c__ VarlinkCall) error
+ GetContainerLogs(c__ VarlinkCall) error
+ UpdateContainer(c__ VarlinkCall) error
+ ListContainers(c__ VarlinkCall) error
ResizeContainerTty(c__ VarlinkCall) error
StartContainer(c__ VarlinkCall) error
- Ping(c__ VarlinkCall) error
- HistoryImage(c__ VarlinkCall, name_ string) error
+ DeleteStoppedContainers(c__ VarlinkCall) error
+ AttachToContainer(c__ VarlinkCall) error
+ ListImages(c__ VarlinkCall) error
+ PullImage(c__ VarlinkCall, name_ string) error
PushImage(c__ VarlinkCall, name_ string, tag_ string, tlsverify_ bool) error
TagImage(c__ VarlinkCall, name_ string, tagged_ string) error
- SearchImage(c__ VarlinkCall, name_ string, limit_ int64) error
- ExportImage(c__ VarlinkCall, name_ string, destination_ string, compress_ bool) error
- RenameContainer(c__ VarlinkCall) error
- WaitContainer(c__ VarlinkCall) error
- BuildImage(c__ VarlinkCall) error
- ExportContainer(c__ VarlinkCall) error
- RemoveImage(c__ VarlinkCall, name_ string, force_ bool) error
+ ListContainerChanges(c__ VarlinkCall) error
+ PauseContainer(c__ VarlinkCall) error
+ CreateImage(c__ VarlinkCall) error
+ InspectImage(c__ VarlinkCall, name_ string) error
}
// Service object with all methods
@@ -826,11 +826,11 @@ func (c__ *VarlinkCall) ReplyRuntimeError(reason_ string) error {
}
// Reply methods for all varlink methods
-func (c__ *VarlinkCall) ReplyGetVersion(version_ Version) error {
+func (c__ *VarlinkCall) ReplyListContainers(notimplemented_ NotImplemented) error {
var out struct {
- Version Version `json:"version"`
+ Notimplemented NotImplemented `json:"notimplemented"`
}
- out.Version = version_
+ out.Notimplemented = notimplemented_
return c__.Reply(&out)
}
@@ -850,7 +850,7 @@ func (c__ *VarlinkCall) ReplyStartContainer(notimplemented_ NotImplemented) erro
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyStopContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyDeleteStoppedContainers(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -858,7 +858,7 @@ func (c__ *VarlinkCall) ReplyStopContainer(notimplemented_ NotImplemented) error
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyUnpauseContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyAttachToContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -866,27 +866,31 @@ func (c__ *VarlinkCall) ReplyUnpauseContainer(notimplemented_ NotImplemented) er
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyPing(ping_ StringResponse) error {
+func (c__ *VarlinkCall) ReplyListImages(images_ []ImageInList) error {
var out struct {
- Ping StringResponse `json:"ping"`
+ Images []ImageInList `json:"images"`
}
- out.Ping = ping_
+ out.Images = []ImageInList(images_)
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplySearchImage(images_ []ImageSearch) error {
+func (c__ *VarlinkCall) ReplyPullImage(id_ string) error {
var out struct {
- Images []ImageSearch `json:"images"`
+ Id string `json:"id"`
}
- out.Images = []ImageSearch(images_)
+ out.Id = id_
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyExportImage() error {
+func (c__ *VarlinkCall) ReplyPushImage() error {
return c__.Reply(nil)
}
-func (c__ *VarlinkCall) ReplyRenameContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyTagImage() error {
+ return c__.Reply(nil)
+}
+
+func (c__ *VarlinkCall) ReplyListContainerChanges(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -894,7 +898,7 @@ func (c__ *VarlinkCall) ReplyRenameContainer(notimplemented_ NotImplemented) err
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyWaitContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyPauseContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -902,7 +906,7 @@ func (c__ *VarlinkCall) ReplyWaitContainer(notimplemented_ NotImplemented) error
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyBuildImage(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyCreateImage(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -910,23 +914,15 @@ func (c__ *VarlinkCall) ReplyBuildImage(notimplemented_ NotImplemented) error {
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyHistoryImage(history_ []ImageHistory) error {
+func (c__ *VarlinkCall) ReplyInspectImage(image_ string) error {
var out struct {
- History []ImageHistory `json:"history"`
+ Image string `json:"image"`
}
- out.History = []ImageHistory(history_)
+ out.Image = image_
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyPushImage() error {
- return c__.Reply(nil)
-}
-
-func (c__ *VarlinkCall) ReplyTagImage() error {
- return c__.Reply(nil)
-}
-
-func (c__ *VarlinkCall) ReplyExportContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyBuildImage(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -934,11 +930,7 @@ func (c__ *VarlinkCall) ReplyExportContainer(notimplemented_ NotImplemented) err
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyRemoveImage() error {
- return c__.Reply(nil)
-}
-
-func (c__ *VarlinkCall) ReplyCreateFromContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyRestartContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -946,15 +938,15 @@ func (c__ *VarlinkCall) ReplyCreateFromContainer(notimplemented_ NotImplemented)
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyPullImage(id_ string) error {
+func (c__ *VarlinkCall) ReplyKillContainer(notimplemented_ NotImplemented) error {
var out struct {
- Id string `json:"id"`
+ Notimplemented NotImplemented `json:"notimplemented"`
}
- out.Id = id_
+ out.Notimplemented = notimplemented_
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyGetContainerLogs(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyWaitContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -962,23 +954,23 @@ func (c__ *VarlinkCall) ReplyGetContainerLogs(notimplemented_ NotImplemented) er
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyRestartContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyImportImage(id_ string) error {
var out struct {
- Notimplemented NotImplemented `json:"notimplemented"`
+ Id string `json:"id"`
}
- out.Notimplemented = notimplemented_
+ out.Id = id_
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyUpdateContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyPing(ping_ StringResponse) error {
var out struct {
- Notimplemented NotImplemented `json:"notimplemented"`
+ Ping StringResponse `json:"ping"`
}
- out.Notimplemented = notimplemented_
+ out.Ping = ping_
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyPauseContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyCreateContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -986,7 +978,7 @@ func (c__ *VarlinkCall) ReplyPauseContainer(notimplemented_ NotImplemented) erro
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyDeleteStoppedContainers(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyInspectContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -994,7 +986,7 @@ func (c__ *VarlinkCall) ReplyDeleteStoppedContainers(notimplemented_ NotImplemen
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyCreateImage(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyGetContainerStats(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1002,7 +994,7 @@ func (c__ *VarlinkCall) ReplyCreateImage(notimplemented_ NotImplemented) error {
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyCreateContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyExportContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1010,7 +1002,7 @@ func (c__ *VarlinkCall) ReplyCreateContainer(notimplemented_ NotImplemented) err
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyListContainerProcesses(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyRemoveContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1018,31 +1010,35 @@ func (c__ *VarlinkCall) ReplyListContainerProcesses(notimplemented_ NotImplement
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyListImages(images_ []ImageInList) error {
+func (c__ *VarlinkCall) ReplySearchImage(images_ []ImageSearch) error {
var out struct {
- Images []ImageInList `json:"images"`
+ Images []ImageSearch `json:"images"`
}
- out.Images = []ImageInList(images_)
+ out.Images = []ImageSearch(images_)
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyImportImage(id_ string) error {
+func (c__ *VarlinkCall) ReplyExportImage() error {
+ return c__.Reply(nil)
+}
+
+func (c__ *VarlinkCall) ReplyStopContainer(notimplemented_ NotImplemented) error {
var out struct {
- Id string `json:"id"`
+ Notimplemented NotImplemented `json:"notimplemented"`
}
- out.Id = id_
+ out.Notimplemented = notimplemented_
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyListContainers(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyDeleteUnusedImages(images_ []string) error {
var out struct {
- Notimplemented NotImplemented `json:"notimplemented"`
+ Images []string `json:"images"`
}
- out.Notimplemented = notimplemented_
+ out.Images = []string(images_)
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyInspectContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyCreateFromContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1050,7 +1046,7 @@ func (c__ *VarlinkCall) ReplyInspectContainer(notimplemented_ NotImplemented) er
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyListContainerChanges(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyRenameContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1058,7 +1054,7 @@ func (c__ *VarlinkCall) ReplyListContainerChanges(notimplemented_ NotImplemented
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyGetContainerStats(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyUnpauseContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1066,23 +1062,27 @@ func (c__ *VarlinkCall) ReplyGetContainerStats(notimplemented_ NotImplemented) e
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyInspectImage(image_ string) error {
+func (c__ *VarlinkCall) ReplyHistoryImage(history_ []ImageHistory) error {
var out struct {
- Image string `json:"image"`
+ History []ImageHistory `json:"history"`
}
- out.Image = image_
+ out.History = []ImageHistory(history_)
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyDeleteUnusedImages(images_ []string) error {
+func (c__ *VarlinkCall) ReplyRemoveImage() error {
+ return c__.Reply(nil)
+}
+
+func (c__ *VarlinkCall) ReplyGetVersion(version_ Version) error {
var out struct {
- Images []string `json:"images"`
+ Version Version `json:"version"`
}
- out.Images = []string(images_)
+ out.Version = version_
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyKillContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyListContainerProcesses(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1090,7 +1090,7 @@ func (c__ *VarlinkCall) ReplyKillContainer(notimplemented_ NotImplemented) error
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyAttachToContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyGetContainerLogs(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1098,7 +1098,7 @@ func (c__ *VarlinkCall) ReplyAttachToContainer(notimplemented_ NotImplemented) e
return c__.Reply(&out)
}
-func (c__ *VarlinkCall) ReplyRemoveContainer(notimplemented_ NotImplemented) error {
+func (c__ *VarlinkCall) ReplyUpdateContainer(notimplemented_ NotImplemented) error {
var out struct {
Notimplemented NotImplemented `json:"notimplemented"`
}
@@ -1107,36 +1107,44 @@ func (c__ *VarlinkCall) ReplyRemoveContainer(notimplemented_ NotImplemented) err
}
// Dummy methods for all varlink methods
-func (s__ *VarlinkInterface) UnpauseContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("UnpauseContainer")
+func (s__ *VarlinkInterface) KillContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("KillContainer")
}
-func (s__ *VarlinkInterface) GetVersion(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("GetVersion")
+func (s__ *VarlinkInterface) WaitContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("WaitContainer")
}
-func (s__ *VarlinkInterface) ResizeContainerTty(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("ResizeContainerTty")
+func (s__ *VarlinkInterface) ImportImage(c__ VarlinkCall, source_ string, reference_ string, message_ string, changes_ []string) error {
+ return c__.ReplyMethodNotImplemented("ImportImage")
}
-func (s__ *VarlinkInterface) StartContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("StartContainer")
+func (s__ *VarlinkInterface) Ping(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("Ping")
}
-func (s__ *VarlinkInterface) StopContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("StopContainer")
+func (s__ *VarlinkInterface) CreateContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("CreateContainer")
}
-func (s__ *VarlinkInterface) Ping(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("Ping")
+func (s__ *VarlinkInterface) InspectContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("InspectContainer")
}
-func (s__ *VarlinkInterface) PushImage(c__ VarlinkCall, name_ string, tag_ string, tlsverify_ bool) error {
- return c__.ReplyMethodNotImplemented("PushImage")
+func (s__ *VarlinkInterface) GetContainerStats(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("GetContainerStats")
}
-func (s__ *VarlinkInterface) TagImage(c__ VarlinkCall, name_ string, tagged_ string) error {
- return c__.ReplyMethodNotImplemented("TagImage")
+func (s__ *VarlinkInterface) RestartContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("RestartContainer")
+}
+
+func (s__ *VarlinkInterface) ExportContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("ExportContainer")
+}
+
+func (s__ *VarlinkInterface) RemoveContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("RemoveContainer")
}
func (s__ *VarlinkInterface) SearchImage(c__ VarlinkCall, name_ string, limit_ int64) error {
@@ -1147,230 +1155,252 @@ func (s__ *VarlinkInterface) ExportImage(c__ VarlinkCall, name_ string, destinat
return c__.ReplyMethodNotImplemented("ExportImage")
}
-func (s__ *VarlinkInterface) RenameContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("RenameContainer")
+func (s__ *VarlinkInterface) StopContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("StopContainer")
}
-func (s__ *VarlinkInterface) WaitContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("WaitContainer")
+func (s__ *VarlinkInterface) DeleteUnusedImages(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("DeleteUnusedImages")
}
-func (s__ *VarlinkInterface) BuildImage(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("BuildImage")
+func (s__ *VarlinkInterface) CreateFromContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("CreateFromContainer")
}
-func (s__ *VarlinkInterface) HistoryImage(c__ VarlinkCall, name_ string) error {
- return c__.ReplyMethodNotImplemented("HistoryImage")
+func (s__ *VarlinkInterface) UnpauseContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("UnpauseContainer")
}
-func (s__ *VarlinkInterface) ExportContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("ExportContainer")
+func (s__ *VarlinkInterface) HistoryImage(c__ VarlinkCall, name_ string) error {
+ return c__.ReplyMethodNotImplemented("HistoryImage")
}
func (s__ *VarlinkInterface) RemoveImage(c__ VarlinkCall, name_ string, force_ bool) error {
return c__.ReplyMethodNotImplemented("RemoveImage")
}
-func (s__ *VarlinkInterface) DeleteStoppedContainers(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("DeleteStoppedContainers")
-}
-
-func (s__ *VarlinkInterface) CreateImage(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("CreateImage")
-}
-
-func (s__ *VarlinkInterface) CreateFromContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("CreateFromContainer")
+func (s__ *VarlinkInterface) GetVersion(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("GetVersion")
}
-func (s__ *VarlinkInterface) PullImage(c__ VarlinkCall, name_ string) error {
- return c__.ReplyMethodNotImplemented("PullImage")
+func (s__ *VarlinkInterface) ListContainerProcesses(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("ListContainerProcesses")
}
func (s__ *VarlinkInterface) GetContainerLogs(c__ VarlinkCall) error {
return c__.ReplyMethodNotImplemented("GetContainerLogs")
}
-func (s__ *VarlinkInterface) RestartContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("RestartContainer")
-}
-
func (s__ *VarlinkInterface) UpdateContainer(c__ VarlinkCall) error {
return c__.ReplyMethodNotImplemented("UpdateContainer")
}
-func (s__ *VarlinkInterface) PauseContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("PauseContainer")
+func (s__ *VarlinkInterface) RenameContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("RenameContainer")
}
-func (s__ *VarlinkInterface) CreateContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("CreateContainer")
+func (s__ *VarlinkInterface) ListContainers(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("ListContainers")
}
-func (s__ *VarlinkInterface) ListContainerProcesses(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("ListContainerProcesses")
+func (s__ *VarlinkInterface) ResizeContainerTty(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("ResizeContainerTty")
}
-func (s__ *VarlinkInterface) ListImages(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("ListImages")
+func (s__ *VarlinkInterface) StartContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("StartContainer")
}
-func (s__ *VarlinkInterface) InspectImage(c__ VarlinkCall, name_ string) error {
- return c__.ReplyMethodNotImplemented("InspectImage")
+func (s__ *VarlinkInterface) DeleteStoppedContainers(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("DeleteStoppedContainers")
}
-func (s__ *VarlinkInterface) DeleteUnusedImages(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("DeleteUnusedImages")
+func (s__ *VarlinkInterface) AttachToContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("AttachToContainer")
}
-func (s__ *VarlinkInterface) ImportImage(c__ VarlinkCall, source_ string, reference_ string, message_ string, changes_ []string) error {
- return c__.ReplyMethodNotImplemented("ImportImage")
+func (s__ *VarlinkInterface) ListImages(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("ListImages")
}
-func (s__ *VarlinkInterface) ListContainers(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("ListContainers")
+func (s__ *VarlinkInterface) PullImage(c__ VarlinkCall, name_ string) error {
+ return c__.ReplyMethodNotImplemented("PullImage")
}
-func (s__ *VarlinkInterface) InspectContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("InspectContainer")
+func (s__ *VarlinkInterface) TagImage(c__ VarlinkCall, name_ string, tagged_ string) error {
+ return c__.ReplyMethodNotImplemented("TagImage")
}
func (s__ *VarlinkInterface) ListContainerChanges(c__ VarlinkCall) error {
return c__.ReplyMethodNotImplemented("ListContainerChanges")
}
-func (s__ *VarlinkInterface) GetContainerStats(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("GetContainerStats")
+func (s__ *VarlinkInterface) PauseContainer(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("PauseContainer")
}
-func (s__ *VarlinkInterface) KillContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("KillContainer")
+func (s__ *VarlinkInterface) CreateImage(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("CreateImage")
}
-func (s__ *VarlinkInterface) AttachToContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("AttachToContainer")
+func (s__ *VarlinkInterface) InspectImage(c__ VarlinkCall, name_ string) error {
+ return c__.ReplyMethodNotImplemented("InspectImage")
}
-func (s__ *VarlinkInterface) RemoveContainer(c__ VarlinkCall) error {
- return c__.ReplyMethodNotImplemented("RemoveContainer")
+func (s__ *VarlinkInterface) PushImage(c__ VarlinkCall, name_ string, tag_ string, tlsverify_ bool) error {
+ return c__.ReplyMethodNotImplemented("PushImage")
+}
+
+func (s__ *VarlinkInterface) BuildImage(c__ VarlinkCall) error {
+ return c__.ReplyMethodNotImplemented("BuildImage")
}
// Method call dispatcher
func (s__ *VarlinkInterface) VarlinkDispatch(call varlink.Call, methodname string) error {
switch methodname {
- case "KillContainer":
- return s__.ioprojectatomicpodmanInterface.KillContainer(VarlinkCall{call})
-
case "AttachToContainer":
return s__.ioprojectatomicpodmanInterface.AttachToContainer(VarlinkCall{call})
- case "RemoveContainer":
- return s__.ioprojectatomicpodmanInterface.RemoveContainer(VarlinkCall{call})
-
- case "GetVersion":
- return s__.ioprojectatomicpodmanInterface.GetVersion(VarlinkCall{call})
-
- case "ResizeContainerTty":
- return s__.ioprojectatomicpodmanInterface.ResizeContainerTty(VarlinkCall{call})
-
- case "StartContainer":
- return s__.ioprojectatomicpodmanInterface.StartContainer(VarlinkCall{call})
-
- case "StopContainer":
- return s__.ioprojectatomicpodmanInterface.StopContainer(VarlinkCall{call})
-
- case "UnpauseContainer":
- return s__.ioprojectatomicpodmanInterface.UnpauseContainer(VarlinkCall{call})
+ case "ListImages":
+ return s__.ioprojectatomicpodmanInterface.ListImages(VarlinkCall{call})
- case "Ping":
- return s__.ioprojectatomicpodmanInterface.Ping(VarlinkCall{call})
+ case "PullImage":
+ var in struct {
+ Name string `json:"name"`
+ }
+ err := call.GetParameters(&in)
+ if err != nil {
+ return call.ReplyInvalidParameter("parameters")
+ }
+ return s__.ioprojectatomicpodmanInterface.PullImage(VarlinkCall{call}, in.Name)
- case "TagImage":
+ case "InspectImage":
var in struct {
- Name string `json:"name"`
- Tagged string `json:"tagged"`
+ Name string `json:"name"`
}
err := call.GetParameters(&in)
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.TagImage(VarlinkCall{call}, in.Name, in.Tagged)
+ return s__.ioprojectatomicpodmanInterface.InspectImage(VarlinkCall{call}, in.Name)
- case "SearchImage":
+ case "PushImage":
var in struct {
- Name string `json:"name"`
- Limit int64 `json:"limit"`
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Tlsverify bool `json:"tlsverify"`
}
err := call.GetParameters(&in)
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.SearchImage(VarlinkCall{call}, in.Name, in.Limit)
+ return s__.ioprojectatomicpodmanInterface.PushImage(VarlinkCall{call}, in.Name, in.Tag, in.Tlsverify)
- case "ExportImage":
+ case "TagImage":
var in struct {
- Name string `json:"name"`
- Destination string `json:"destination"`
- Compress bool `json:"compress"`
+ Name string `json:"name"`
+ Tagged string `json:"tagged"`
}
err := call.GetParameters(&in)
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.ExportImage(VarlinkCall{call}, in.Name, in.Destination, in.Compress)
+ return s__.ioprojectatomicpodmanInterface.TagImage(VarlinkCall{call}, in.Name, in.Tagged)
- case "RenameContainer":
- return s__.ioprojectatomicpodmanInterface.RenameContainer(VarlinkCall{call})
+ case "ListContainerChanges":
+ return s__.ioprojectatomicpodmanInterface.ListContainerChanges(VarlinkCall{call})
- case "WaitContainer":
- return s__.ioprojectatomicpodmanInterface.WaitContainer(VarlinkCall{call})
+ case "PauseContainer":
+ return s__.ioprojectatomicpodmanInterface.PauseContainer(VarlinkCall{call})
+
+ case "CreateImage":
+ return s__.ioprojectatomicpodmanInterface.CreateImage(VarlinkCall{call})
case "BuildImage":
return s__.ioprojectatomicpodmanInterface.BuildImage(VarlinkCall{call})
- case "HistoryImage":
+ case "GetContainerStats":
+ return s__.ioprojectatomicpodmanInterface.GetContainerStats(VarlinkCall{call})
+
+ case "RestartContainer":
+ return s__.ioprojectatomicpodmanInterface.RestartContainer(VarlinkCall{call})
+
+ case "KillContainer":
+ return s__.ioprojectatomicpodmanInterface.KillContainer(VarlinkCall{call})
+
+ case "WaitContainer":
+ return s__.ioprojectatomicpodmanInterface.WaitContainer(VarlinkCall{call})
+
+ case "ImportImage":
var in struct {
- Name string `json:"name"`
+ Source string `json:"source"`
+ Reference string `json:"reference"`
+ Message string `json:"message"`
+ Changes []string `json:"changes"`
}
err := call.GetParameters(&in)
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.HistoryImage(VarlinkCall{call}, in.Name)
+ return s__.ioprojectatomicpodmanInterface.ImportImage(VarlinkCall{call}, in.Source, in.Reference, in.Message, []string(in.Changes))
- case "PushImage":
+ case "Ping":
+ return s__.ioprojectatomicpodmanInterface.Ping(VarlinkCall{call})
+
+ case "CreateContainer":
+ return s__.ioprojectatomicpodmanInterface.CreateContainer(VarlinkCall{call})
+
+ case "InspectContainer":
+ return s__.ioprojectatomicpodmanInterface.InspectContainer(VarlinkCall{call})
+
+ case "ExportImage":
var in struct {
- Name string `json:"name"`
- Tag string `json:"tag"`
- Tlsverify bool `json:"tlsverify"`
+ Name string `json:"name"`
+ Destination string `json:"destination"`
+ Compress bool `json:"compress"`
}
err := call.GetParameters(&in)
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.PushImage(VarlinkCall{call}, in.Name, in.Tag, in.Tlsverify)
+ return s__.ioprojectatomicpodmanInterface.ExportImage(VarlinkCall{call}, in.Name, in.Destination, in.Compress)
case "ExportContainer":
return s__.ioprojectatomicpodmanInterface.ExportContainer(VarlinkCall{call})
- case "RemoveImage":
+ case "RemoveContainer":
+ return s__.ioprojectatomicpodmanInterface.RemoveContainer(VarlinkCall{call})
+
+ case "SearchImage":
var in struct {
Name string `json:"name"`
- Force bool `json:"force"`
+ Limit int64 `json:"limit"`
}
err := call.GetParameters(&in)
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.RemoveImage(VarlinkCall{call}, in.Name, in.Force)
+ return s__.ioprojectatomicpodmanInterface.SearchImage(VarlinkCall{call}, in.Name, in.Limit)
- case "CreateImage":
- return s__.ioprojectatomicpodmanInterface.CreateImage(VarlinkCall{call})
+ case "StopContainer":
+ return s__.ioprojectatomicpodmanInterface.StopContainer(VarlinkCall{call})
+
+ case "DeleteUnusedImages":
+ return s__.ioprojectatomicpodmanInterface.DeleteUnusedImages(VarlinkCall{call})
case "CreateFromContainer":
return s__.ioprojectatomicpodmanInterface.CreateFromContainer(VarlinkCall{call})
- case "PullImage":
+ case "UpdateContainer":
+ return s__.ioprojectatomicpodmanInterface.UpdateContainer(VarlinkCall{call})
+
+ case "RenameContainer":
+ return s__.ioprojectatomicpodmanInterface.RenameContainer(VarlinkCall{call})
+
+ case "UnpauseContainer":
+ return s__.ioprojectatomicpodmanInterface.UnpauseContainer(VarlinkCall{call})
+
+ case "HistoryImage":
var in struct {
Name string `json:"name"`
}
@@ -1378,69 +1408,39 @@ func (s__ *VarlinkInterface) VarlinkDispatch(call varlink.Call, methodname strin
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.PullImage(VarlinkCall{call}, in.Name)
-
- case "GetContainerLogs":
- return s__.ioprojectatomicpodmanInterface.GetContainerLogs(VarlinkCall{call})
-
- case "RestartContainer":
- return s__.ioprojectatomicpodmanInterface.RestartContainer(VarlinkCall{call})
-
- case "UpdateContainer":
- return s__.ioprojectatomicpodmanInterface.UpdateContainer(VarlinkCall{call})
-
- case "PauseContainer":
- return s__.ioprojectatomicpodmanInterface.PauseContainer(VarlinkCall{call})
-
- case "DeleteStoppedContainers":
- return s__.ioprojectatomicpodmanInterface.DeleteStoppedContainers(VarlinkCall{call})
-
- case "CreateContainer":
- return s__.ioprojectatomicpodmanInterface.CreateContainer(VarlinkCall{call})
-
- case "ListContainerProcesses":
- return s__.ioprojectatomicpodmanInterface.ListContainerProcesses(VarlinkCall{call})
-
- case "ListImages":
- return s__.ioprojectatomicpodmanInterface.ListImages(VarlinkCall{call})
-
- case "DeleteUnusedImages":
- return s__.ioprojectatomicpodmanInterface.DeleteUnusedImages(VarlinkCall{call})
+ return s__.ioprojectatomicpodmanInterface.HistoryImage(VarlinkCall{call}, in.Name)
- case "ImportImage":
+ case "RemoveImage":
var in struct {
- Source string `json:"source"`
- Reference string `json:"reference"`
- Message string `json:"message"`
- Changes []string `json:"changes"`
+ Name string `json:"name"`
+ Force bool `json:"force"`
}
err := call.GetParameters(&in)
if err != nil {
return call.ReplyInvalidParameter("parameters")
}
- return s__.ioprojectatomicpodmanInterface.ImportImage(VarlinkCall{call}, in.Source, in.Reference, in.Message, []string(in.Changes))
+ return s__.ioprojectatomicpodmanInterface.RemoveImage(VarlinkCall{call}, in.Name, in.Force)
- case "ListContainers":
- return s__.ioprojectatomicpodmanInterface.ListContainers(VarlinkCall{call})
+ case "GetVersion":
+ return s__.ioprojectatomicpodmanInterface.GetVersion(VarlinkCall{call})
- case "InspectContainer":
- return s__.ioprojectatomicpodmanInterface.InspectContainer(VarlinkCall{call})
+ case "ListContainerProcesses":
+ return s__.ioprojectatomicpodmanInterface.ListContainerProcesses(VarlinkCall{call})
- case "ListContainerChanges":
- return s__.ioprojectatomicpodmanInterface.ListContainerChanges(VarlinkCall{call})
+ case "GetContainerLogs":
+ return s__.ioprojectatomicpodmanInterface.GetContainerLogs(VarlinkCall{call})
- case "GetContainerStats":
- return s__.ioprojectatomicpodmanInterface.GetContainerStats(VarlinkCall{call})
+ case "DeleteStoppedContainers":
+ return s__.ioprojectatomicpodmanInterface.DeleteStoppedContainers(VarlinkCall{call})
- case "InspectImage":
- var in struct {
- Name string `json:"name"`
- }
- err := call.GetParameters(&in)
- if err != nil {
- return call.ReplyInvalidParameter("parameters")
- }
- return s__.ioprojectatomicpodmanInterface.InspectImage(VarlinkCall{call}, in.Name)
+ case "ListContainers":
+ return s__.ioprojectatomicpodmanInterface.ListContainers(VarlinkCall{call})
+
+ case "ResizeContainerTty":
+ return s__.ioprojectatomicpodmanInterface.ResizeContainerTty(VarlinkCall{call})
+
+ case "StartContainer":
+ return s__.ioprojectatomicpodmanInterface.StartContainer(VarlinkCall{call})
default:
return call.ReplyMethodNotFound(methodname)
diff --git a/libpod/buildah/buildah.go b/libpod/buildah/buildah.go
deleted file mode 100644
index 8f4b95ac8..000000000
--- a/libpod/buildah/buildah.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package buildah
-
-import (
- "context"
- "encoding/json"
- "path/filepath"
-
- is "github.com/containers/image/storage"
- "github.com/containers/image/types"
- "github.com/containers/storage"
- "github.com/containers/storage/pkg/ioutils"
- "github.com/opencontainers/go-digest"
- "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
- "github.com/projectatomic/libpod/cmd/podman/docker"
-)
-
-const (
- // Package is the name of this package, used in help output and to
- // identify working containers.
- Package = "buildah"
- // Version for the Package. Bump version in contrib/rpm/buildah.spec
- // too.
- Version = "0.15"
- // The value we use to identify what type of information, currently a
- // serialized Builder structure, we are using as per-container state.
- // This should only be changed when we make incompatible changes to
- // that data structure, as it's used to distinguish containers which
- // are "ours" from ones that aren't.
- containerType = Package + " 0.0.1"
- // The file in the per-container directory which we use to store our
- // per-container state. If it isn't there, then the container isn't
- // one of our build containers.
- stateFile = Package + ".json"
-)
-
-// Builder objects are used to represent containers which are being used to
-// build images. They also carry potential updates which will be applied to
-// the image's configuration when the container's contents are used to build an
-// image.
-type Builder struct {
- store storage.Store
-
- // Type is used to help identify a build container's metadata. It
- // should not be modified.
- Type string `json:"type"`
- // FromImage is the name of the source image which was used to create
- // the container, if one was used. It should not be modified.
- FromImage string `json:"image,omitempty"`
- // FromImageID is the ID of the source image which was used to create
- // the container, if one was used. It should not be modified.
- FromImageID string `json:"image-id"`
- // Config is the source image's configuration. It should not be
- // modified.
- Config []byte `json:"config,omitempty"`
- // Manifest is the source image's manifest. It should not be modified.
- Manifest []byte `json:"manifest,omitempty"`
-
- // Container is the name of the build container. It should not be modified.
- Container string `json:"container-name,omitempty"`
- // ContainerID is the ID of the build container. It should not be modified.
- ContainerID string `json:"container-id,omitempty"`
- // MountPoint is the last location where the container's root
- // filesystem was mounted. It should not be modified.
- MountPoint string `json:"mountpoint,omitempty"`
- // ProcessLabel is the SELinux process label associated with the container
- ProcessLabel string `json:"process-label,omitempty"`
- // MountLabel is the SELinux mount label associated with the container
- MountLabel string `json:"mount-label,omitempty"`
-
- // ImageAnnotations is a set of key-value pairs which is stored in the
- // image's manifest.
- ImageAnnotations map[string]string `json:"annotations,omitempty"`
- // ImageCreatedBy is a description of how this container was built.
- ImageCreatedBy string `json:"created-by,omitempty"`
-
- // Image metadata and runtime settings, in multiple formats.
- OCIv1 v1.Image `json:"ociv1,omitempty"`
- Docker docker.V2Image `json:"docker,omitempty"`
- // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
- DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
- CommonBuildOpts *CommonBuildOptions
-}
-
-// CommonBuildOptions are reseources that can be defined by flags for both buildah from and bud
-type CommonBuildOptions struct {
- // AddHost is the list of hostnames to add to the resolv.conf
- AddHost []string
- //CgroupParent it the path to cgroups under which the cgroup for the container will be created.
- CgroupParent string
- //CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period
- CPUPeriod uint64
- //CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota
- CPUQuota int64
- //CPUShares (relative weight
- CPUShares uint64
- //CPUSetCPUs in which to allow execution (0-3, 0,1)
- CPUSetCPUs string
- //CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
- CPUSetMems string
- //Memory limit
- Memory int64
- //MemorySwap limit value equal to memory plus swap.
- MemorySwap int64
- //SecruityOpts modify the way container security is running
- LabelOpts []string
- SeccompProfilePath string
- ApparmorProfile string
- //ShmSize is the shared memory size
- ShmSize string
- //Ulimit options
- Ulimit []string
- //Volumes to bind mount into the container
- Volumes []string
-}
-
-// ImportOptions are used to initialize a Builder from an existing container
-// which was created elsewhere.
-type ImportOptions struct {
- // Container is the name of the build container.
- Container string
- // SignaturePolicyPath specifies an override location for the signature
- // policy which should be used for verifying the new image as it is
- // being written. Except in specific circumstances, no value should be
- // specified, indicating that the shared, system-wide default policy
- // should be used.
- SignaturePolicyPath string
-}
-
-// ImportBuilder creates a new build configuration using an already-present
-// container.
-func ImportBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
- return importBuilder(ctx, store, options)
-}
-
-func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
- if options.Container == "" {
- return nil, errors.Errorf("container name must be specified")
- }
-
- c, err := store.Container(options.Container)
- if err != nil {
- return nil, err
- }
-
- systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath)
-
- builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID)
- if err != nil {
- return nil, err
- }
-
- if builder.FromImageID != "" {
- if d, err2 := digest.Parse(builder.FromImageID); err2 == nil {
- builder.Docker.Parent = docker.ID(d)
- } else {
- builder.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), builder.FromImageID))
- }
- }
- if builder.FromImage != "" {
- builder.Docker.ContainerConfig.Image = builder.FromImage
- }
-
- err = builder.Save()
- if err != nil {
- return nil, errors.Wrapf(err, "error saving builder state")
- }
-
- return builder, nil
-}
-
-func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) {
- manifest := []byte{}
- config := []byte{}
- imageName := ""
-
- if imageID != "" {
- ref, err := is.Transport.ParseStoreReference(store, imageID)
- if err != nil {
- return nil, errors.Wrapf(err, "no such image %q", imageID)
- }
- src, err2 := ref.NewImage(ctx, systemContext)
- if err2 != nil {
- return nil, errors.Wrapf(err2, "error instantiating image")
- }
- defer src.Close()
- config, err = src.ConfigBlob(ctx)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading image configuration")
- }
- manifest, _, err = src.Manifest(ctx)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading image manifest")
- }
- if img, err3 := store.Image(imageID); err3 == nil {
- if len(img.Names) > 0 {
- imageName = img.Names[0]
- }
- }
- }
-
- builder := &Builder{
- store: store,
- Type: containerType,
- FromImage: imageName,
- FromImageID: imageID,
- Config: config,
- Manifest: manifest,
- Container: containerName,
- ContainerID: containerID,
- ImageAnnotations: map[string]string{},
- ImageCreatedBy: "",
- }
-
- builder.initConfig()
-
- return builder, nil
-}
-
-// Save saves the builder's current state to the build container's metadata.
-// This should not need to be called directly, as other methods of the Builder
-// object take care of saving their state.
-func (b *Builder) Save() error {
- buildstate, err := json.Marshal(b)
- if err != nil {
- return err
- }
- cdir, err := b.store.ContainerDirectory(b.ContainerID)
- if err != nil {
- return err
- }
- return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600)
-}
diff --git a/libpod/buildah/util.go b/libpod/buildah/util.go
deleted file mode 100644
index 96f9ebf86..000000000
--- a/libpod/buildah/util.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package buildah
-
-import (
- "github.com/containers/image/docker/reference"
- "github.com/containers/storage"
- "github.com/containers/storage/pkg/reexec"
- "github.com/pkg/errors"
-)
-
-// InitReexec is a wrapper for reexec.Init(). It should be called at
-// the start of main(), and if it returns true, main() should return
-// immediately.
-func InitReexec() bool {
- return reexec.Init()
-}
-
-func copyStringStringMap(m map[string]string) map[string]string {
- n := map[string]string{}
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-func copyStringSlice(s []string) []string {
- t := make([]string, len(s))
- copy(t, s)
- return t
-}
-
-// AddImageNames adds the specified names to the specified image.
-func AddImageNames(store storage.Store, image *storage.Image, addNames []string) error {
- names, err := ExpandNames(addNames)
- if err != nil {
- return err
- }
- err = store.SetNames(image.ID, append(image.Names, names...))
- if err != nil {
- return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID)
- }
- return nil
-}
-
-// ExpandNames takes unqualified names, parses them as image names, and returns
-// the fully expanded result, including a tag. Names which don't include a registry
-// name will be marked for the most-preferred registry (i.e., the first one in our
-// configuration).
-func ExpandNames(names []string) ([]string, error) {
- expanded := make([]string, 0, len(names))
- for _, n := range names {
- name, err := reference.ParseNormalizedNamed(n)
- if err != nil {
- return nil, errors.Wrapf(err, "error parsing name %q", n)
- }
- name = reference.TagNameOnly(name)
- tag := ""
- digest := ""
- if tagged, ok := name.(reference.NamedTagged); ok {
- tag = ":" + tagged.Tag()
- }
- if digested, ok := name.(reference.Digested); ok {
- digest = "@" + digested.Digest().String()
- }
- expanded = append(expanded, name.Name()+tag+digest)
- }
- return expanded, nil
-}
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index e136f96a4..a227e0987 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -2,11 +2,12 @@ package libpod
import (
"context"
+ "fmt"
"strings"
is "github.com/containers/image/storage"
"github.com/pkg/errors"
- "github.com/projectatomic/libpod/libpod/buildah"
+ "github.com/projectatomic/buildah"
"github.com/projectatomic/libpod/libpod/image"
"github.com/sirupsen/logrus"
)
@@ -52,9 +53,10 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
SignaturePolicyPath: options.SignaturePolicyPath,
}
commitOptions := buildah.CommitOptions{
- SignaturePolicyPath: options.SignaturePolicyPath,
- ReportWriter: options.ReportWriter,
- SystemContext: sc,
+ SignaturePolicyPath: options.SignaturePolicyPath,
+ ReportWriter: options.ReportWriter,
+ SystemContext: sc,
+ PreferredManifestType: options.PreferredManifestType,
}
importBuilder, err := buildah.ImportBuilder(ctx, c.runtime.store, builderOptions)
if err != nil {
@@ -68,6 +70,38 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
importBuilder.SetComment(options.Message)
}
+ // We need to take meta we find in the current container and
+ // add it to the resulting image.
+
+ // Entrypoint - always set this first or cmd will get wiped out
+ importBuilder.SetEntrypoint(c.Spec().Process.Args)
+ // Cmd
+ // We cannot differentiate between cmd and entrypoint here
+ // so we assign args to both
+ importBuilder.SetCmd(c.Spec().Process.Args)
+ // Env
+ for _, e := range c.config.Spec.Process.Env {
+ splitEnv := strings.Split(e, "=")
+ importBuilder.SetEnv(splitEnv[0], splitEnv[1])
+ }
+ // Expose ports
+ for _, p := range c.config.PortMappings {
+ importBuilder.SetPort(string(p.ContainerPort))
+ }
+ // Labels
+ for k, v := range c.Labels() {
+ importBuilder.SetLabel(k, v)
+ }
+ // No stop signal
+ // User
+ importBuilder.SetUser(c.User())
+ // Volumes
+ for _, v := range c.config.Spec.Mounts {
+ importBuilder.AddVolume(v.Source)
+ }
+ // Workdir
+ importBuilder.SetWorkDir(c.Spec().Process.Cwd)
+
// Process user changes
for _, change := range options.Changes {
splitChange := strings.Split(change, "=")
@@ -77,16 +111,20 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
case "ENTRYPOINT":
importBuilder.SetEntrypoint(splitChange[1:])
case "ENV":
+ importBuilder.ClearEnv()
importBuilder.SetEnv(splitChange[1], splitChange[2])
case "EXPOSE":
+ importBuilder.ClearPorts()
importBuilder.SetPort(splitChange[1])
case "LABEL":
+ importBuilder.ClearLabels()
importBuilder.SetLabel(splitChange[1], splitChange[2])
case "STOPSIGNAL":
// No Set StopSignal
case "USER":
importBuilder.SetUser(splitChange[1])
case "VOLUME":
+ importBuilder.ClearVolumes()
importBuilder.AddVolume(splitChange[1])
case "WORKDIR":
importBuilder.SetWorkDir(splitChange[1])
@@ -96,9 +134,9 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
if err != nil {
return nil, err
}
-
if err = importBuilder.Commit(ctx, imageRef, commitOptions); err != nil {
return nil, err
}
+ fmt.Println(importBuilder.Comment())
return c.runtime.imageRuntime.NewFromLocal(imageRef.DockerReference().String())
}
diff --git a/libpod/image/image.go b/libpod/image/image.go
index b2dd22b82..db0fdab90 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -12,6 +12,7 @@ import (
types2 "github.com/containernetworking/cni/pkg/types"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
is "github.com/containers/image/storage"
"github.com/containers/image/tarball"
"github.com/containers/image/transports/alltransports"
@@ -21,6 +22,7 @@ import (
"github.com/opencontainers/go-digest"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
+ "github.com/projectatomic/buildah"
"github.com/projectatomic/libpod/libpod/common"
"github.com/projectatomic/libpod/libpod/driver"
"github.com/projectatomic/libpod/pkg/inspect"
@@ -608,6 +610,7 @@ func (i *Image) ociv1Image(ctx context.Context) (*ociv1.Image, error) {
if err != nil {
return nil, err
}
+
return imgRef.OCIConfig(ctx)
}
@@ -660,11 +663,20 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) {
return nil, err
}
+ _, manifestType, err := i.Manifest(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to determine manifest type")
+ }
+ comment, err := i.Comment(ctx, manifestType)
+ if err != nil {
+ return nil, err
+ }
+
data := &inspect.ImageData{
ID: i.ID(),
RepoTags: i.Names(),
RepoDigests: repoDigests,
- Comment: ociv1Img.History[0].Comment,
+ Comment: comment,
Created: ociv1Img.Created,
Author: ociv1Img.Author,
Architecture: ociv1Img.Architecture,
@@ -680,7 +692,8 @@ func (i *Image) Inspect(ctx context.Context) (*inspect.ImageData, error) {
Type: ociv1Img.RootFS.Type,
Layers: ociv1Img.RootFS.DiffIDs,
},
- GraphDriver: driver,
+ GraphDriver: driver,
+ ManifestType: manifestType,
}
return data, nil
}
@@ -802,3 +815,27 @@ func (i *Image) Containers() ([]string, error) {
}
return imageContainers, err
}
+
+// Comment returns the Comment for an image depending on its ManifestType
+func (i *Image) Comment(ctx context.Context, manifestType string) (string, error) {
+ if manifestType == buildah.Dockerv2ImageManifest {
+ imgRef, err := i.toImageRef(ctx)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to create image reference from image")
+ }
+ blob, err := imgRef.ConfigBlob(ctx)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to get config blob from image")
+ }
+ b := manifest.Schema2Image{}
+ if err := json.Unmarshal(blob, &b); err != nil {
+ return "", err
+ }
+ return b.Comment, nil
+ }
+ ociv1Img, err := i.ociv1Image(ctx)
+ if err != nil {
+ return "", err
+ }
+ return ociv1Img.History[0].Comment, nil
+}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index 041de0cc2..2392c41d4 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "context"
"fmt"
"io"
@@ -13,6 +14,7 @@ import (
"github.com/containers/storage/pkg/archive"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
+ "github.com/projectatomic/buildah/imagebuildah"
"github.com/projectatomic/libpod/libpod/common"
"github.com/projectatomic/libpod/libpod/image"
)
@@ -177,3 +179,8 @@ func removeStorageContainers(ctrIDs []string, store storage.Store) error {
}
return nil
}
+
+// Build adds the runtime to the imagebuildah call
+func (r *Runtime) Build(ctx context.Context, options imagebuildah.BuildOptions, dockerfiles ...string) error {
+ return imagebuildah.BuildDockerfiles(ctx, r.store, options, dockerfiles...)
+}
diff --git a/pkg/inspect/inspect.go b/pkg/inspect/inspect.go
index b2dd1e1c0..9d4444c89 100644
--- a/pkg/inspect/inspect.go
+++ b/pkg/inspect/inspect.go
@@ -125,6 +125,7 @@ type ImageData struct {
RootFS *RootFS `json:"RootFS"`
Labels map[string]string `json:"Labels"`
Annotations map[string]string `json:"Annotations"`
+ ManifestType string `json:"ManifestType"`
}
// RootFS holds the root fs information of an image
diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go
index 154a83bd2..30f208f6f 100644
--- a/test/e2e/commit_test.go
+++ b/test/e2e/commit_test.go
@@ -48,7 +48,7 @@ var _ = Describe("Podman commit", func() {
Expect(ec).To(Equal(0))
Expect(podmanTest.NumberOfContainers()).To(Equal(1))
- session := podmanTest.Podman([]string{"commit", "--message", "testing-commit", "test1", "foobar.com/test1-image:latest"})
+ session := podmanTest.Podman([]string{"commit", "-f", "docker", "--message", "testing-commit", "test1", "foobar.com/test1-image:latest"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/vendor.conf b/vendor.conf
index 487431232..bf7293267 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -17,7 +17,7 @@ github.com/cri-o/ocicni master
github.com/cyphar/filepath-securejoin v0.2.1
github.com/davecgh/go-spew v1.1.0
github.com/docker/distribution 7a8efe719e55bbfaff7bc5718cdf0ed51ca821df
-github.com/docker/docker d4f6db83c21cfc6af54fffb1f13e8acb7199f96a
+github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
github.com/docker/go-units v0.3.2
@@ -88,3 +88,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master
github.com/varlink/go master https://github.com/varlink/go
+github.com/projectatomic/buildah master https://github.com/projectatomic/buildah
+vendor/github.com/Nvveen/Gotty master
+github.com/fsouza/go-dockerclient master
+github.com/openshift/imagebuilder master
diff --git a/vendor/github.com/Nvveen/Gotty/LICENSE b/vendor/github.com/Nvveen/Gotty/LICENSE
new file mode 100644
index 000000000..0b71c9736
--- /dev/null
+++ b/vendor/github.com/Nvveen/Gotty/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of the FreeBSD Project.
diff --git a/vendor/github.com/Nvveen/Gotty/README b/vendor/github.com/Nvveen/Gotty/README
new file mode 100644
index 000000000..a6b0d9a8f
--- /dev/null
+++ b/vendor/github.com/Nvveen/Gotty/README
@@ -0,0 +1,5 @@
+Gotty is a library written in Go that determines and reads termcap database
+files to produce an interface for interacting with the capabilities of a
+terminal.
+See the godoc documentation or the source code for more information about
+function usage.
diff --git a/vendor/github.com/Nvveen/Gotty/attributes.go b/vendor/github.com/Nvveen/Gotty/attributes.go
new file mode 100644
index 000000000..a4c005fae
--- /dev/null
+++ b/vendor/github.com/Nvveen/Gotty/attributes.go
@@ -0,0 +1,514 @@
+// Copyright 2012 Neal van Veen. All rights reserved.
+// Usage of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package gotty
+
+// Boolean capabilities
+var BoolAttr = [...]string{
+ "auto_left_margin", "bw",
+ "auto_right_margin", "am",
+ "no_esc_ctlc", "xsb",
+ "ceol_standout_glitch", "xhp",
+ "eat_newline_glitch", "xenl",
+ "erase_overstrike", "eo",
+ "generic_type", "gn",
+ "hard_copy", "hc",
+ "has_meta_key", "km",
+ "has_status_line", "hs",
+ "insert_null_glitch", "in",
+ "memory_above", "da",
+ "memory_below", "db",
+ "move_insert_mode", "mir",
+ "move_standout_mode", "msgr",
+ "over_strike", "os",
+ "status_line_esc_ok", "eslok",
+ "dest_tabs_magic_smso", "xt",
+ "tilde_glitch", "hz",
+ "transparent_underline", "ul",
+ "xon_xoff", "nxon",
+ "needs_xon_xoff", "nxon",
+ "prtr_silent", "mc5i",
+ "hard_cursor", "chts",
+ "non_rev_rmcup", "nrrmc",
+ "no_pad_char", "npc",
+ "non_dest_scroll_region", "ndscr",
+ "can_change", "ccc",
+ "back_color_erase", "bce",
+ "hue_lightness_saturation", "hls",
+ "col_addr_glitch", "xhpa",
+ "cr_cancels_micro_mode", "crxm",
+ "has_print_wheel", "daisy",
+ "row_addr_glitch", "xvpa",
+ "semi_auto_right_margin", "sam",
+ "cpi_changes_res", "cpix",
+ "lpi_changes_res", "lpix",
+ "backspaces_with_bs", "",
+ "crt_no_scrolling", "",
+ "no_correctly_working_cr", "",
+ "gnu_has_meta_key", "",
+ "linefeed_is_newline", "",
+ "has_hardware_tabs", "",
+ "return_does_clr_eol", "",
+}
+
+// Numerical capabilities
+var NumAttr = [...]string{
+ "columns", "cols",
+ "init_tabs", "it",
+ "lines", "lines",
+ "lines_of_memory", "lm",
+ "magic_cookie_glitch", "xmc",
+ "padding_baud_rate", "pb",
+ "virtual_terminal", "vt",
+ "width_status_line", "wsl",
+ "num_labels", "nlab",
+ "label_height", "lh",
+ "label_width", "lw",
+ "max_attributes", "ma",
+ "maximum_windows", "wnum",
+ "max_colors", "colors",
+ "max_pairs", "pairs",
+ "no_color_video", "ncv",
+ "buffer_capacity", "bufsz",
+ "dot_vert_spacing", "spinv",
+ "dot_horz_spacing", "spinh",
+ "max_micro_address", "maddr",
+ "max_micro_jump", "mjump",
+ "micro_col_size", "mcs",
+ "micro_line_size", "mls",
+ "number_of_pins", "npins",
+ "output_res_char", "orc",
+ "output_res_line", "orl",
+ "output_res_horz_inch", "orhi",
+ "output_res_vert_inch", "orvi",
+ "print_rate", "cps",
+ "wide_char_size", "widcs",
+ "buttons", "btns",
+ "bit_image_entwining", "bitwin",
+ "bit_image_type", "bitype",
+ "magic_cookie_glitch_ul", "",
+ "carriage_return_delay", "",
+ "new_line_delay", "",
+ "backspace_delay", "",
+ "horizontal_tab_delay", "",
+ "number_of_function_keys", "",
+}
+
+// String capabilities
+var StrAttr = [...]string{
+ "back_tab", "cbt",
+ "bell", "bel",
+ "carriage_return", "cr",
+ "change_scroll_region", "csr",
+ "clear_all_tabs", "tbc",
+ "clear_screen", "clear",
+ "clr_eol", "el",
+ "clr_eos", "ed",
+ "column_address", "hpa",
+ "command_character", "cmdch",
+ "cursor_address", "cup",
+ "cursor_down", "cud1",
+ "cursor_home", "home",
+ "cursor_invisible", "civis",
+ "cursor_left", "cub1",
+ "cursor_mem_address", "mrcup",
+ "cursor_normal", "cnorm",
+ "cursor_right", "cuf1",
+ "cursor_to_ll", "ll",
+ "cursor_up", "cuu1",
+ "cursor_visible", "cvvis",
+ "delete_character", "dch1",
+ "delete_line", "dl1",
+ "dis_status_line", "dsl",
+ "down_half_line", "hd",
+ "enter_alt_charset_mode", "smacs",
+ "enter_blink_mode", "blink",
+ "enter_bold_mode", "bold",
+ "enter_ca_mode", "smcup",
+ "enter_delete_mode", "smdc",
+ "enter_dim_mode", "dim",
+ "enter_insert_mode", "smir",
+ "enter_secure_mode", "invis",
+ "enter_protected_mode", "prot",
+ "enter_reverse_mode", "rev",
+ "enter_standout_mode", "smso",
+ "enter_underline_mode", "smul",
+ "erase_chars", "ech",
+ "exit_alt_charset_mode", "rmacs",
+ "exit_attribute_mode", "sgr0",
+ "exit_ca_mode", "rmcup",
+ "exit_delete_mode", "rmdc",
+ "exit_insert_mode", "rmir",
+ "exit_standout_mode", "rmso",
+ "exit_underline_mode", "rmul",
+ "flash_screen", "flash",
+ "form_feed", "ff",
+ "from_status_line", "fsl",
+ "init_1string", "is1",
+ "init_2string", "is2",
+ "init_3string", "is3",
+ "init_file", "if",
+ "insert_character", "ich1",
+ "insert_line", "il1",
+ "insert_padding", "ip",
+ "key_backspace", "kbs",
+ "key_catab", "ktbc",
+ "key_clear", "kclr",
+ "key_ctab", "kctab",
+ "key_dc", "kdch1",
+ "key_dl", "kdl1",
+ "key_down", "kcud1",
+ "key_eic", "krmir",
+ "key_eol", "kel",
+ "key_eos", "ked",
+ "key_f0", "kf0",
+ "key_f1", "kf1",
+ "key_f10", "kf10",
+ "key_f2", "kf2",
+ "key_f3", "kf3",
+ "key_f4", "kf4",
+ "key_f5", "kf5",
+ "key_f6", "kf6",
+ "key_f7", "kf7",
+ "key_f8", "kf8",
+ "key_f9", "kf9",
+ "key_home", "khome",
+ "key_ic", "kich1",
+ "key_il", "kil1",
+ "key_left", "kcub1",
+ "key_ll", "kll",
+ "key_npage", "knp",
+ "key_ppage", "kpp",
+ "key_right", "kcuf1",
+ "key_sf", "kind",
+ "key_sr", "kri",
+ "key_stab", "khts",
+ "key_up", "kcuu1",
+ "keypad_local", "rmkx",
+ "keypad_xmit", "smkx",
+ "lab_f0", "lf0",
+ "lab_f1", "lf1",
+ "lab_f10", "lf10",
+ "lab_f2", "lf2",
+ "lab_f3", "lf3",
+ "lab_f4", "lf4",
+ "lab_f5", "lf5",
+ "lab_f6", "lf6",
+ "lab_f7", "lf7",
+ "lab_f8", "lf8",
+ "lab_f9", "lf9",
+ "meta_off", "rmm",
+ "meta_on", "smm",
+ "newline", "_glitch",
+ "pad_char", "npc",
+ "parm_dch", "dch",
+ "parm_delete_line", "dl",
+ "parm_down_cursor", "cud",
+ "parm_ich", "ich",
+ "parm_index", "indn",
+ "parm_insert_line", "il",
+ "parm_left_cursor", "cub",
+ "parm_right_cursor", "cuf",
+ "parm_rindex", "rin",
+ "parm_up_cursor", "cuu",
+ "pkey_key", "pfkey",
+ "pkey_local", "pfloc",
+ "pkey_xmit", "pfx",
+ "print_screen", "mc0",
+ "prtr_off", "mc4",
+ "prtr_on", "mc5",
+ "repeat_char", "rep",
+ "reset_1string", "rs1",
+ "reset_2string", "rs2",
+ "reset_3string", "rs3",
+ "reset_file", "rf",
+ "restore_cursor", "rc",
+ "row_address", "mvpa",
+ "save_cursor", "row_address",
+ "scroll_forward", "ind",
+ "scroll_reverse", "ri",
+ "set_attributes", "sgr",
+ "set_tab", "hts",
+ "set_window", "wind",
+ "tab", "s_magic_smso",
+ "to_status_line", "tsl",
+ "underline_char", "uc",
+ "up_half_line", "hu",
+ "init_prog", "iprog",
+ "key_a1", "ka1",
+ "key_a3", "ka3",
+ "key_b2", "kb2",
+ "key_c1", "kc1",
+ "key_c3", "kc3",
+ "prtr_non", "mc5p",
+ "char_padding", "rmp",
+ "acs_chars", "acsc",
+ "plab_norm", "pln",
+ "key_btab", "kcbt",
+ "enter_xon_mode", "smxon",
+ "exit_xon_mode", "rmxon",
+ "enter_am_mode", "smam",
+ "exit_am_mode", "rmam",
+ "xon_character", "xonc",
+ "xoff_character", "xoffc",
+ "ena_acs", "enacs",
+ "label_on", "smln",
+ "label_off", "rmln",
+ "key_beg", "kbeg",
+ "key_cancel", "kcan",
+ "key_close", "kclo",
+ "key_command", "kcmd",
+ "key_copy", "kcpy",
+ "key_create", "kcrt",
+ "key_end", "kend",
+ "key_enter", "kent",
+ "key_exit", "kext",
+ "key_find", "kfnd",
+ "key_help", "khlp",
+ "key_mark", "kmrk",
+ "key_message", "kmsg",
+ "key_move", "kmov",
+ "key_next", "knxt",
+ "key_open", "kopn",
+ "key_options", "kopt",
+ "key_previous", "kprv",
+ "key_print", "kprt",
+ "key_redo", "krdo",
+ "key_reference", "kref",
+ "key_refresh", "krfr",
+ "key_replace", "krpl",
+ "key_restart", "krst",
+ "key_resume", "kres",
+ "key_save", "ksav",
+ "key_suspend", "kspd",
+ "key_undo", "kund",
+ "key_sbeg", "kBEG",
+ "key_scancel", "kCAN",
+ "key_scommand", "kCMD",
+ "key_scopy", "kCPY",
+ "key_screate", "kCRT",
+ "key_sdc", "kDC",
+ "key_sdl", "kDL",
+ "key_select", "kslt",
+ "key_send", "kEND",
+ "key_seol", "kEOL",
+ "key_sexit", "kEXT",
+ "key_sfind", "kFND",
+ "key_shelp", "kHLP",
+ "key_shome", "kHOM",
+ "key_sic", "kIC",
+ "key_sleft", "kLFT",
+ "key_smessage", "kMSG",
+ "key_smove", "kMOV",
+ "key_snext", "kNXT",
+ "key_soptions", "kOPT",
+ "key_sprevious", "kPRV",
+ "key_sprint", "kPRT",
+ "key_sredo", "kRDO",
+ "key_sreplace", "kRPL",
+ "key_sright", "kRIT",
+ "key_srsume", "kRES",
+ "key_ssave", "kSAV",
+ "key_ssuspend", "kSPD",
+ "key_sundo", "kUND",
+ "req_for_input", "rfi",
+ "key_f11", "kf11",
+ "key_f12", "kf12",
+ "key_f13", "kf13",
+ "key_f14", "kf14",
+ "key_f15", "kf15",
+ "key_f16", "kf16",
+ "key_f17", "kf17",
+ "key_f18", "kf18",
+ "key_f19", "kf19",
+ "key_f20", "kf20",
+ "key_f21", "kf21",
+ "key_f22", "kf22",
+ "key_f23", "kf23",
+ "key_f24", "kf24",
+ "key_f25", "kf25",
+ "key_f26", "kf26",
+ "key_f27", "kf27",
+ "key_f28", "kf28",
+ "key_f29", "kf29",
+ "key_f30", "kf30",
+ "key_f31", "kf31",
+ "key_f32", "kf32",
+ "key_f33", "kf33",
+ "key_f34", "kf34",
+ "key_f35", "kf35",
+ "key_f36", "kf36",
+ "key_f37", "kf37",
+ "key_f38", "kf38",
+ "key_f39", "kf39",
+ "key_f40", "kf40",
+ "key_f41", "kf41",
+ "key_f42", "kf42",
+ "key_f43", "kf43",
+ "key_f44", "kf44",
+ "key_f45", "kf45",
+ "key_f46", "kf46",
+ "key_f47", "kf47",
+ "key_f48", "kf48",
+ "key_f49", "kf49",
+ "key_f50", "kf50",
+ "key_f51", "kf51",
+ "key_f52", "kf52",
+ "key_f53", "kf53",
+ "key_f54", "kf54",
+ "key_f55", "kf55",
+ "key_f56", "kf56",
+ "key_f57", "kf57",
+ "key_f58", "kf58",
+ "key_f59", "kf59",
+ "key_f60", "kf60",
+ "key_f61", "kf61",
+ "key_f62", "kf62",
+ "key_f63", "kf63",
+ "clr_bol", "el1",
+ "clear_margins", "mgc",
+ "set_left_margin", "smgl",
+ "set_right_margin", "smgr",
+ "label_format", "fln",
+ "set_clock", "sclk",
+ "display_clock", "dclk",
+ "remove_clock", "rmclk",
+ "create_window", "cwin",
+ "goto_window", "wingo",
+ "hangup", "hup",
+ "dial_phone", "dial",
+ "quick_dial", "qdial",
+ "tone", "tone",
+ "pulse", "pulse",
+ "flash_hook", "hook",
+ "fixed_pause", "pause",
+ "wait_tone", "wait",
+ "user0", "u0",
+ "user1", "u1",
+ "user2", "u2",
+ "user3", "u3",
+ "user4", "u4",
+ "user5", "u5",
+ "user6", "u6",
+ "user7", "u7",
+ "user8", "u8",
+ "user9", "u9",
+ "orig_pair", "op",
+ "orig_colors", "oc",
+ "initialize_color", "initc",
+ "initialize_pair", "initp",
+ "set_color_pair", "scp",
+ "set_foreground", "setf",
+ "set_background", "setb",
+ "change_char_pitch", "cpi",
+ "change_line_pitch", "lpi",
+ "change_res_horz", "chr",
+ "change_res_vert", "cvr",
+ "define_char", "defc",
+ "enter_doublewide_mode", "swidm",
+ "enter_draft_quality", "sdrfq",
+ "enter_italics_mode", "sitm",
+ "enter_leftward_mode", "slm",
+ "enter_micro_mode", "smicm",
+ "enter_near_letter_quality", "snlq",
+ "enter_normal_quality", "snrmq",
+ "enter_shadow_mode", "sshm",
+ "enter_subscript_mode", "ssubm",
+ "enter_superscript_mode", "ssupm",
+ "enter_upward_mode", "sum",
+ "exit_doublewide_mode", "rwidm",
+ "exit_italics_mode", "ritm",
+ "exit_leftward_mode", "rlm",
+ "exit_micro_mode", "rmicm",
+ "exit_shadow_mode", "rshm",
+ "exit_subscript_mode", "rsubm",
+ "exit_superscript_mode", "rsupm",
+ "exit_upward_mode", "rum",
+ "micro_column_address", "mhpa",
+ "micro_down", "mcud1",
+ "micro_left", "mcub1",
+ "micro_right", "mcuf1",
+ "micro_row_address", "mvpa",
+ "micro_up", "mcuu1",
+ "order_of_pins", "porder",
+ "parm_down_micro", "mcud",
+ "parm_left_micro", "mcub",
+ "parm_right_micro", "mcuf",
+ "parm_up_micro", "mcuu",
+ "select_char_set", "scs",
+ "set_bottom_margin", "smgb",
+ "set_bottom_margin_parm", "smgbp",
+ "set_left_margin_parm", "smglp",
+ "set_right_margin_parm", "smgrp",
+ "set_top_margin", "smgt",
+ "set_top_margin_parm", "smgtp",
+ "start_bit_image", "sbim",
+ "start_char_set_def", "scsd",
+ "stop_bit_image", "rbim",
+ "stop_char_set_def", "rcsd",
+ "subscript_characters", "subcs",
+ "superscript_characters", "supcs",
+ "these_cause_cr", "docr",
+ "zero_motion", "zerom",
+ "char_set_names", "csnm",
+ "key_mouse", "kmous",
+ "mouse_info", "minfo",
+ "req_mouse_pos", "reqmp",
+ "get_mouse", "getm",
+ "set_a_foreground", "setaf",
+ "set_a_background", "setab",
+ "pkey_plab", "pfxl",
+ "device_type", "devt",
+ "code_set_init", "csin",
+ "set0_des_seq", "s0ds",
+ "set1_des_seq", "s1ds",
+ "set2_des_seq", "s2ds",
+ "set3_des_seq", "s3ds",
+ "set_lr_margin", "smglr",
+ "set_tb_margin", "smgtb",
+ "bit_image_repeat", "birep",
+ "bit_image_newline", "binel",
+ "bit_image_carriage_return", "bicr",
+ "color_names", "colornm",
+ "define_bit_image_region", "defbi",
+ "end_bit_image_region", "endbi",
+ "set_color_band", "setcolor",
+ "set_page_length", "slines",
+ "display_pc_char", "dispc",
+ "enter_pc_charset_mode", "smpch",
+ "exit_pc_charset_mode", "rmpch",
+ "enter_scancode_mode", "smsc",
+ "exit_scancode_mode", "rmsc",
+ "pc_term_options", "pctrm",
+ "scancode_escape", "scesc",
+ "alt_scancode_esc", "scesa",
+ "enter_horizontal_hl_mode", "ehhlm",
+ "enter_left_hl_mode", "elhlm",
+ "enter_low_hl_mode", "elohlm",
+ "enter_right_hl_mode", "erhlm",
+ "enter_top_hl_mode", "ethlm",
+ "enter_vertical_hl_mode", "evhlm",
+ "set_a_attributes", "sgr1",
+ "set_pglen_inch", "slength",
+ "termcap_init2", "",
+ "termcap_reset", "",
+ "linefeed_if_not_lf", "",
+ "backspace_if_not_bs", "",
+ "other_non_function_keys", "",
+ "arrow_key_map", "",
+ "acs_ulcorner", "",
+ "acs_llcorner", "",
+ "acs_urcorner", "",
+ "acs_lrcorner", "",
+ "acs_ltee", "",
+ "acs_rtee", "",
+ "acs_btee", "",
+ "acs_ttee", "",
+ "acs_hline", "",
+ "acs_vline", "",
+ "acs_plus", "",
+ "memory_lock", "",
+ "memory_unlock", "",
+ "box_chars_1", "",
+}
diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/Nvveen/Gotty/gotty.go
new file mode 100644
index 000000000..093cbf37e
--- /dev/null
+++ b/vendor/github.com/Nvveen/Gotty/gotty.go
@@ -0,0 +1,238 @@
+// Copyright 2012 Neal van Veen. All rights reserved.
+// Usage of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Gotty is a Go-package for reading and parsing the terminfo database
+package gotty
+
+// TODO add more concurrency to name lookup, look for more opportunities.
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// Open a terminfo file by the name given and construct a TermInfo object.
+// If something went wrong reading the terminfo database file, an error is
+// returned.
+func OpenTermInfo(termName string) (*TermInfo, error) {
+ var term *TermInfo
+ var err error
+ // Find the environment variables
+ termloc := os.Getenv("TERMINFO")
+ if len(termloc) == 0 {
+ // Search like ncurses
+ locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/",
+ "/lib/terminfo/", "/usr/share/terminfo/"}
+ var path string
+ for _, str := range locations {
+ // Construct path
+ path = str + string(termName[0]) + "/" + termName
+ // Check if path can be opened
+ file, _ := os.Open(path)
+ if file != nil {
+ // Path can open, fall out and use current path
+ file.Close()
+ break
+ }
+ }
+ if len(path) > 0 {
+ term, err = readTermInfo(path)
+ } else {
+ err = errors.New(fmt.Sprintf("No terminfo file(-location) found"))
+ }
+ }
+ return term, err
+}
+
+// Open a terminfo file from the environment variable containing the current
+// terminal name and construct a TermInfo object. If something went wrong
+// reading the terminfo database file, an error is returned.
+func OpenTermInfoEnv() (*TermInfo, error) {
+ termenv := os.Getenv("TERM")
+ return OpenTermInfo(termenv)
+}
+
+// Return an attribute by the name attr provided. If none can be found,
+// an error is returned.
+func (term *TermInfo) GetAttribute(attr string) (stacker, error) {
+ // Channel to store the main value in.
+ var value stacker
+ // Add a blocking WaitGroup
+ var block sync.WaitGroup
+ // Keep track of variable being written.
+ written := false
+ // Function to put into goroutine.
+ f := func(ats interface{}) {
+ var ok bool
+ var v stacker
+ // Switch on type of map to use and assign value to it.
+ switch reflect.TypeOf(ats).Elem().Kind() {
+ case reflect.Bool:
+ v, ok = ats.(map[string]bool)[attr]
+ case reflect.Int16:
+ v, ok = ats.(map[string]int16)[attr]
+ case reflect.String:
+ v, ok = ats.(map[string]string)[attr]
+ }
+ // If ok, a value is found, so we can write.
+ if ok {
+ value = v
+ written = true
+ }
+ // Goroutine is done
+ block.Done()
+ }
+ block.Add(3)
+ // Go for all 3 attribute lists.
+ go f(term.boolAttributes)
+ go f(term.numAttributes)
+ go f(term.strAttributes)
+ // Wait until every goroutine is done.
+ block.Wait()
+ // If a value has been written, return it.
+ if written {
+ return value, nil
+ }
+ // Otherwise, error.
+ return nil, fmt.Errorf("Erorr finding attribute")
+}
+
+// Return an attribute by the name attr provided. If none can be found,
+// an error is returned. A name is first converted to its termcap value.
+func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
+ tc := GetTermcapName(name)
+ return term.GetAttribute(tc)
+}
+
+// A utility function that finds and returns the termcap equivalent of a
+// variable name.
+func GetTermcapName(name string) string {
+ // Termcap name
+ var tc string
+ // Blocking group
+ var wait sync.WaitGroup
+ // Function to put into a goroutine
+ f := func(attrs []string) {
+ // Find the string corresponding to the name
+ for i, s := range attrs {
+ if s == name {
+ tc = attrs[i+1]
+ }
+ }
+ // Goroutine is finished
+ wait.Done()
+ }
+ wait.Add(3)
+ // Go for all 3 attribute lists
+ go f(BoolAttr[:])
+ go f(NumAttr[:])
+ go f(StrAttr[:])
+ // Wait until every goroutine is done
+ wait.Wait()
+ // Return the termcap name
+ return tc
+}
+
+// This function takes a path to a terminfo file and reads it in binary
+// form to construct the actual TermInfo file.
+func readTermInfo(path string) (*TermInfo, error) {
+ // Open the terminfo file
+ file, err := os.Open(path)
+ defer file.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize
+ // Header is composed of the magic 0432 octal number, size of the name
+ // section, size of the boolean section, the amount of number values,
+ // the number of offsets of strings, and the size of the string section.
+ var header [6]int16
+ // Byte array is used to read in byte values
+ var byteArray []byte
+ // Short array is used to read in short values
+ var shArray []int16
+ // TermInfo object to store values
+ var term TermInfo
+
+ // Read in the header
+ err = binary.Read(file, binary.LittleEndian, &header)
+ if err != nil {
+ return nil, err
+ }
+ // If magic number isn't there or isn't correct, we have the wrong filetype
+ if header[0] != 0432 {
+ return nil, errors.New(fmt.Sprintf("Wrong filetype"))
+ }
+
+ // Read in the names
+ byteArray = make([]byte, header[1])
+ err = binary.Read(file, binary.LittleEndian, &byteArray)
+ if err != nil {
+ return nil, err
+ }
+ term.Names = strings.Split(string(byteArray), "|")
+
+ // Read in the booleans
+ byteArray = make([]byte, header[2])
+ err = binary.Read(file, binary.LittleEndian, &byteArray)
+ if err != nil {
+ return nil, err
+ }
+ term.boolAttributes = make(map[string]bool)
+ for i, b := range byteArray {
+ if b == 1 {
+ term.boolAttributes[BoolAttr[i*2+1]] = true
+ }
+ }
+ // If the number of bytes read is not even, a byte for alignment is added
+ if len(byteArray)%2 != 0 {
+ err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Read in shorts
+ shArray = make([]int16, header[3])
+ err = binary.Read(file, binary.LittleEndian, &shArray)
+ if err != nil {
+ return nil, err
+ }
+ term.numAttributes = make(map[string]int16)
+ for i, n := range shArray {
+ if n != 0377 && n > -1 {
+ term.numAttributes[NumAttr[i*2+1]] = n
+ }
+ }
+
+ // Read the offsets into the short array
+ shArray = make([]int16, header[4])
+ err = binary.Read(file, binary.LittleEndian, &shArray)
+ if err != nil {
+ return nil, err
+ }
+ // Read the actual strings in the byte array
+ byteArray = make([]byte, header[5])
+ err = binary.Read(file, binary.LittleEndian, &byteArray)
+ if err != nil {
+ return nil, err
+ }
+ term.strAttributes = make(map[string]string)
+ // We get an offset, and then iterate until the string is null-terminated
+ for i, offset := range shArray {
+ if offset > -1 {
+ r := offset
+ for ; byteArray[r] != 0; r++ {
+ }
+ term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
+ }
+ }
+ return &term, nil
+}
diff --git a/vendor/github.com/Nvveen/Gotty/parser.go b/vendor/github.com/Nvveen/Gotty/parser.go
new file mode 100644
index 000000000..a9d5d23c5
--- /dev/null
+++ b/vendor/github.com/Nvveen/Gotty/parser.go
@@ -0,0 +1,362 @@
+// Copyright 2012 Neal van Veen. All rights reserved.
+// Usage of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package gotty
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var exp = [...]string{
+ "%%",
+ "%c",
+ "%s",
+ "%p(\\d)",
+ "%P([A-z])",
+ "%g([A-z])",
+ "%'(.)'",
+ "%{([0-9]+)}",
+ "%l",
+ "%\\+|%-|%\\*|%/|%m",
+ "%&|%\\||%\\^",
+ "%=|%>|%<",
+ "%A|%O",
+ "%!|%~",
+ "%i",
+ "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]",
+ "%\\?(.*?);",
+}
+
+var regex *regexp.Regexp
+var staticVar map[byte]stacker
+
+// Parses the attribute that is received with name attr and parameters params.
+func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) {
+ // Get the attribute name first.
+ iface, err := term.GetAttribute(attr)
+ str, ok := iface.(string)
+ if err != nil {
+ return "", err
+ }
+ if !ok {
+ return str, errors.New("Only string capabilities can be parsed.")
+ }
+ // Construct the hidden parser struct so we can use a recursive stack based
+ // parser.
+ ps := &parser{}
+ // Dynamic variables only exist in this context.
+ ps.dynamicVar = make(map[byte]stacker, 26)
+ ps.parameters = make([]stacker, len(params))
+ // Convert the parameters to insert them into the parser struct.
+ for i, x := range params {
+ ps.parameters[i] = x
+ }
+ // Recursively walk and return.
+ result, err := ps.walk(str)
+ return result, err
+}
+
+// Parses the attribute that is received with name attr and parameters params.
+// Only works on full name of a capability that is given, which it uses to
+// search for the termcap name.
+func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) {
+ tc := GetTermcapName(attr)
+ return term.Parse(tc, params)
+}
+
+// Identify each token in a stack based manner and do the actual parsing.
+func (ps *parser) walk(attr string) (string, error) {
+ // We use a buffer to get the modified string.
+ var buf bytes.Buffer
+ // Next, find and identify all tokens by their indices and strings.
+ tokens := regex.FindAllStringSubmatch(attr, -1)
+ if len(tokens) == 0 {
+ return attr, nil
+ }
+ indices := regex.FindAllStringIndex(attr, -1)
+ q := 0 // q counts the matches of one token
+ // Iterate through the string per character.
+ for i := 0; i < len(attr); i++ {
+ // If the current position is an identified token, execute the following
+ // steps.
+ if q < len(indices) && i >= indices[q][0] && i < indices[q][1] {
+ // Switch on token.
+ switch {
+ case tokens[q][0][:2] == "%%":
+ // Literal percentage character.
+ buf.WriteByte('%')
+ case tokens[q][0][:2] == "%c":
+ // Pop a character.
+ c, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ buf.WriteByte(c.(byte))
+ case tokens[q][0][:2] == "%s":
+ // Pop a string.
+ str, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ if _, ok := str.(string); !ok {
+ return buf.String(), errors.New("Stack head is not a string")
+ }
+ buf.WriteString(str.(string))
+ case tokens[q][0][:2] == "%p":
+ // Push a parameter on the stack.
+ index, err := strconv.ParseInt(tokens[q][1], 10, 8)
+ index--
+ if err != nil {
+ return buf.String(), err
+ }
+ if int(index) >= len(ps.parameters) {
+ return buf.String(), errors.New("Parameters index out of bound")
+ }
+ ps.st.push(ps.parameters[index])
+ case tokens[q][0][:2] == "%P":
+ // Pop a variable from the stack as a dynamic or static variable.
+ val, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ index := tokens[q][2]
+ if len(index) > 1 {
+ errorStr := fmt.Sprintf("%s is not a valid dynamic variables index",
+ index)
+ return buf.String(), errors.New(errorStr)
+ }
+ // Specify either dynamic or static.
+ if index[0] >= 'a' && index[0] <= 'z' {
+ ps.dynamicVar[index[0]] = val
+ } else if index[0] >= 'A' && index[0] <= 'Z' {
+ staticVar[index[0]] = val
+ }
+ case tokens[q][0][:2] == "%g":
+ // Push a variable from the stack as a dynamic or static variable.
+ index := tokens[q][3]
+ if len(index) > 1 {
+ errorStr := fmt.Sprintf("%s is not a valid static variables index",
+ index)
+ return buf.String(), errors.New(errorStr)
+ }
+ var val stacker
+ if index[0] >= 'a' && index[0] <= 'z' {
+ val = ps.dynamicVar[index[0]]
+ } else if index[0] >= 'A' && index[0] <= 'Z' {
+ val = staticVar[index[0]]
+ }
+ ps.st.push(val)
+ case tokens[q][0][:2] == "%'":
+ // Push a character constant.
+ con := tokens[q][4]
+ if len(con) > 1 {
+ errorStr := fmt.Sprintf("%s is not a valid character constant", con)
+ return buf.String(), errors.New(errorStr)
+ }
+ ps.st.push(con[0])
+ case tokens[q][0][:2] == "%{":
+ // Push an integer constant.
+ con, err := strconv.ParseInt(tokens[q][5], 10, 32)
+ if err != nil {
+ return buf.String(), err
+ }
+ ps.st.push(con)
+ case tokens[q][0][:2] == "%l":
+ // Push the length of the string that is popped from the stack.
+ popStr, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ if _, ok := popStr.(string); !ok {
+ errStr := fmt.Sprintf("Stack head is not a string")
+ return buf.String(), errors.New(errStr)
+ }
+ ps.st.push(len(popStr.(string)))
+ case tokens[q][0][:2] == "%?":
+ // If-then-else construct. First, the whole string is identified and
+ // then inside this substring, we can specify which parts to switch on.
+ ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);")
+ ifTokens := ifReg.FindStringSubmatch(tokens[q][0])
+ var (
+ ifStr string
+ err error
+ )
+ // Parse the if-part to determine if-else.
+ if len(ifTokens[1]) > 0 {
+ ifStr, err = ps.walk(ifTokens[1])
+ } else { // else
+ ifStr, err = ps.walk(ifTokens[4])
+ }
+ // Return any errors
+ if err != nil {
+ return buf.String(), err
+ } else if len(ifStr) > 0 {
+ // Self-defined limitation, not sure if this is correct, but didn't
+ // seem like it.
+ return buf.String(), errors.New("If-clause cannot print statements")
+ }
+ var thenStr string
+ // Pop the first value that is set by parsing the if-clause.
+ choose, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ // Switch to if or else.
+ if choose.(int) == 0 && len(ifTokens[1]) > 0 {
+ thenStr, err = ps.walk(ifTokens[3])
+ } else if choose.(int) != 0 {
+ if len(ifTokens[1]) > 0 {
+ thenStr, err = ps.walk(ifTokens[2])
+ } else {
+ thenStr, err = ps.walk(ifTokens[5])
+ }
+ }
+ if err != nil {
+ return buf.String(), err
+ }
+ buf.WriteString(thenStr)
+ case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing
+ fallthrough
+ case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits.
+ fallthrough
+ case tokens[q][0][len(tokens[q][0])-1] == 'x':
+ fallthrough
+ case tokens[q][0][len(tokens[q][0])-1] == 'X':
+ fallthrough
+ case tokens[q][0][len(tokens[q][0])-1] == 's':
+ token := tokens[q][0]
+ // Remove the : that comes before a flag.
+ if token[1] == ':' {
+ token = token[:1] + token[2:]
+ }
+ digit, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ // The rest is determined like the normal formatted prints.
+ digitStr := fmt.Sprintf(token, digit.(int))
+ buf.WriteString(digitStr)
+ case tokens[q][0][:2] == "%i":
+ // Increment the parameters by one.
+ if len(ps.parameters) < 2 {
+ return buf.String(), errors.New("Not enough parameters to increment.")
+ }
+ val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int)
+ val1++
+ val2++
+ ps.parameters[0], ps.parameters[1] = val1, val2
+ default:
+ // The rest of the tokens is a special case, where two values are
+ // popped and then operated on by the token that comes after them.
+ op1, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ op2, err := ps.st.pop()
+ if err != nil {
+ return buf.String(), err
+ }
+ var result stacker
+ switch tokens[q][0][:2] {
+ case "%+":
+ // Addition
+ result = op2.(int) + op1.(int)
+ case "%-":
+ // Subtraction
+ result = op2.(int) - op1.(int)
+ case "%*":
+ // Multiplication
+ result = op2.(int) * op1.(int)
+ case "%/":
+ // Division
+ result = op2.(int) / op1.(int)
+ case "%m":
+ // Modulo
+ result = op2.(int) % op1.(int)
+ case "%&":
+ // Bitwise AND
+ result = op2.(int) & op1.(int)
+ case "%|":
+ // Bitwise OR
+ result = op2.(int) | op1.(int)
+ case "%^":
+ // Bitwise XOR
+ result = op2.(int) ^ op1.(int)
+ case "%=":
+ // Equals
+ result = op2 == op1
+ case "%>":
+ // Greater-than
+ result = op2.(int) > op1.(int)
+ case "%<":
+ // Lesser-than
+ result = op2.(int) < op1.(int)
+ case "%A":
+ // Logical AND
+ result = op2.(bool) && op1.(bool)
+ case "%O":
+ // Logical OR
+ result = op2.(bool) || op1.(bool)
+ case "%!":
+ // Logical complement
+ result = !op1.(bool)
+ case "%~":
+ // Bitwise complement
+ result = ^(op1.(int))
+ }
+ ps.st.push(result)
+ }
+
+ i = indices[q][1] - 1
+ q++
+ } else {
+ // We are not "inside" a token, so just skip until the end or the next
+ // token, and add all characters to the buffer.
+ j := i
+ if q != len(indices) {
+ for !(j >= indices[q][0] && j < indices[q][1]) {
+ j++
+ }
+ } else {
+ j = len(attr)
+ }
+ buf.WriteString(string(attr[i:j]))
+ i = j
+ }
+ }
+ // Return the buffer as a string.
+ return buf.String(), nil
+}
+
+// Push a stacker-value onto the stack.
+func (st *stack) push(s stacker) {
+ *st = append(*st, s)
+}
+
+// Pop a stacker-value from the stack.
+func (st *stack) pop() (stacker, error) {
+ if len(*st) == 0 {
+ return nil, errors.New("Stack is empty.")
+ }
+ newStack := make(stack, len(*st)-1)
+ val := (*st)[len(*st)-1]
+ copy(newStack, (*st)[:len(*st)-1])
+ *st = newStack
+ return val, nil
+}
+
+// Initialize regexes and the static vars (that don't get changed between
+// calls.
+func init() {
+ // Initialize the main regex.
+ expStr := strings.Join(exp[:], "|")
+ regex, _ = regexp.Compile(expStr)
+ // Initialize the static variables.
+ staticVar = make(map[byte]stacker, 26)
+}
diff --git a/vendor/github.com/Nvveen/Gotty/types.go b/vendor/github.com/Nvveen/Gotty/types.go
new file mode 100644
index 000000000..9bcc65e9b
--- /dev/null
+++ b/vendor/github.com/Nvveen/Gotty/types.go
@@ -0,0 +1,23 @@
+// Copyright 2012 Neal van Veen. All rights reserved.
+// Usage of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package gotty
+
+type TermInfo struct {
+ boolAttributes map[string]bool
+ numAttributes map[string]int16
+ strAttributes map[string]string
+ // The various names of the TermInfo file.
+ Names []string
+}
+
+type stacker interface {
+}
+type stack []stacker
+
+type parser struct {
+ st stack
+ parameters []stacker
+ dynamicVar map[byte]stacker
+}
diff --git a/vendor/github.com/containerd/continuity/LICENSE b/vendor/github.com/containerd/continuity/LICENSE
new file mode 100644
index 000000000..8f71f43fe
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md
new file mode 100644
index 000000000..0e91ce07b
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/README.md
@@ -0,0 +1,74 @@
+# continuity
+
+[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
+[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
+
+A transport-agnostic, filesystem metadata manifest system
+
+This project is a staging area for experiments in providing transport agnostic
+metadata storage.
+
+Please see https://github.com/opencontainers/specs/issues/11 for more details.
+
+## Manifest Format
+
+A continuity manifest encodes filesystem metadata in Protocol Buffers.
+Please refer to [proto/manifest.proto](proto/manifest.proto).
+
+## Usage
+
+Build:
+
+```console
+$ make
+```
+
+Create a manifest (of this repo itself):
+
+```console
+$ ./bin/continuity build . > /tmp/a.pb
+```
+
+Dump a manifest:
+
+```console
+$ ./bin/continuity ls /tmp/a.pb
+...
+-rw-rw-r-- 270 B /.gitignore
+-rw-rw-r-- 88 B /.mailmap
+-rw-rw-r-- 187 B /.travis.yml
+-rw-rw-r-- 359 B /AUTHORS
+-rw-rw-r-- 11 kB /LICENSE
+-rw-rw-r-- 1.5 kB /Makefile
+...
+-rw-rw-r-- 986 B /testutil_test.go
+drwxrwxr-x 0 B /version
+-rw-rw-r-- 478 B /version/version.go
+```
+
+Verify a manifest:
+
+```console
+$ ./bin/continuity verify . /tmp/a.pb
+```
+
+Break the directory and restore using the manifest:
+```console
+$ chmod 777 Makefile
+$ ./bin/continuity verify . /tmp/a.pb
+2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
+$ ./bin/continuity apply . /tmp/a.pb
+$ stat -c %a Makefile
+664
+$ ./bin/continuity verify . /tmp/a.pb
+```
+
+
+## Contribution Guide
+### Building Proto Package
+
+If you change the proto file you will need to rebuild the generated Go with `go generate`.
+
+```console
+$ go generate ./proto
+```
diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go
new file mode 100644
index 000000000..b43d55fe9
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go
@@ -0,0 +1,85 @@
+package pathdriver
+
+import (
+ "path/filepath"
+)
+
+// PathDriver provides all of the path manipulation functions in a common
+// interface. The context should call these and never use the `filepath`
+// package or any other package to manipulate paths.
+type PathDriver interface {
+ Join(paths ...string) string
+ IsAbs(path string) bool
+ Rel(base, target string) (string, error)
+ Base(path string) string
+ Dir(path string) string
+ Clean(path string) string
+ Split(path string) (dir, file string)
+ Separator() byte
+ Abs(path string) (string, error)
+ Walk(string, filepath.WalkFunc) error
+ FromSlash(path string) string
+ ToSlash(path string) string
+ Match(pattern, name string) (matched bool, err error)
+}
+
+// pathDriver is a simple default implementation calls the filepath package.
+type pathDriver struct{}
+
+// LocalPathDriver is the exported pathDriver struct for convenience.
+var LocalPathDriver PathDriver = &pathDriver{}
+
+func (*pathDriver) Join(paths ...string) string {
+ return filepath.Join(paths...)
+}
+
+func (*pathDriver) IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
+
+func (*pathDriver) Rel(base, target string) (string, error) {
+ return filepath.Rel(base, target)
+}
+
+func (*pathDriver) Base(path string) string {
+ return filepath.Base(path)
+}
+
+func (*pathDriver) Dir(path string) string {
+ return filepath.Dir(path)
+}
+
+func (*pathDriver) Clean(path string) string {
+ return filepath.Clean(path)
+}
+
+func (*pathDriver) Split(path string) (dir, file string) {
+ return filepath.Split(path)
+}
+
+func (*pathDriver) Separator() byte {
+ return filepath.Separator
+}
+
+func (*pathDriver) Abs(path string) (string, error) {
+ return filepath.Abs(path)
+}
+
+// Note that filepath.Walk calls os.Stat, so if the context wants to
+// to call Driver.Stat() for Walk, they need to create a new struct that
+// overrides this method.
+func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
+ return filepath.Walk(root, walkFn)
+}
+
+func (*pathDriver) FromSlash(path string) string {
+ return filepath.FromSlash(path)
+}
+
+func (*pathDriver) ToSlash(path string) string {
+ return filepath.ToSlash(path)
+}
+
+func (*pathDriver) Match(pattern, name string) (bool, error) {
+ return filepath.Match(pattern, name)
+}
diff --git a/vendor/github.com/containerd/continuity/vendor.conf b/vendor/github.com/containerd/continuity/vendor.conf
new file mode 100644
index 000000000..7c80deec5
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/vendor.conf
@@ -0,0 +1,13 @@
+bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
+github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626
+github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
+github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
+github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b
+github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489
+github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
+golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
+golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
+golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
+golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
index 6e462aeda..d0229e038 100644
--- a/vendor/github.com/docker/docker/api/common.go
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -1,65 +1,11 @@
package api
-import (
- "encoding/json"
- "encoding/pem"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/docker/docker/pkg/ioutils"
- "github.com/docker/docker/pkg/system"
- "github.com/docker/libtrust"
-)
-
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
- DefaultVersion string = "1.32"
+ DefaultVersion string = "1.34"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
NoBaseImageSpecifier string = "scratch"
)
-
-// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
-// otherwise generates a new one
-func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
- err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
- if err != nil {
- return nil, err
- }
- trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
- if err == libtrust.ErrKeyFileDoesNotExist {
- trustKey, err = libtrust.GenerateECP256PrivateKey()
- if err != nil {
- return nil, fmt.Errorf("Error generating key: %s", err)
- }
- encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
- if err != nil {
- return nil, fmt.Errorf("Error serializing key: %s", err)
- }
- if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
- return nil, fmt.Errorf("Error saving key file: %s", err)
- }
- } else if err != nil {
- return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
- }
- return trustKey, nil
-}
-
-func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
- if ext == ".json" || ext == ".jwk" {
- encoded, err = json.Marshal(key)
- if err != nil {
- return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
- }
- } else {
- pemBlock, err := key.PEMBlock()
- if err != nil {
- return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
- }
- encoded = pem.EncodeToMemory(pemBlock)
- }
- return
-}
diff --git a/vendor/github.com/docker/docker/api/names.go b/vendor/github.com/docker/docker/api/names.go
deleted file mode 100644
index f147d1f4c..000000000
--- a/vendor/github.com/docker/docker/api/names.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package api
-
-import "regexp"
-
-// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
-const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
-
-// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
-var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
index 18a1263f1..4ca9ccac7 100644
--- a/vendor/github.com/docker/docker/api/types/client.go
+++ b/vendor/github.com/docker/docker/api/types/client.go
@@ -181,7 +181,7 @@ type ImageBuildOptions struct {
SessionID string
// TODO @jhowardmsft LCOW Support: This will require extending to include
- // `Platform string`, but is ommited for now as it's hard-coded temporarily
+ // `Platform string`, but is omitted for now as it's hard-coded temporarily
// to avoid API changes.
}
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
index 9fea9eb04..bb421b388 100644
--- a/vendor/github.com/docker/docker/api/types/container/host_config.go
+++ b/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -23,41 +23,46 @@ func (i Isolation) IsDefault() bool {
// IpcMode represents the container ipc stack.
type IpcMode string
-// IsPrivate indicates whether the container uses its private ipc stack.
+// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
func (n IpcMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
+ return n == "private"
}
-// IsHost indicates whether the container uses the host's ipc stack.
+// IsHost indicates whether the container shares the host's ipc namespace.
func (n IpcMode) IsHost() bool {
return n == "host"
}
-// IsContainer indicates whether the container uses a container's ipc stack.
+// IsShareable indicates whether the container's ipc namespace can be shared with another container.
+func (n IpcMode) IsShareable() bool {
+ return n == "shareable"
+}
+
+// IsContainer indicates whether the container uses another container's ipc namespace.
func (n IpcMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
-// Valid indicates whether the ipc stack is valid.
+// IsNone indicates whether container IpcMode is set to "none".
+func (n IpcMode) IsNone() bool {
+ return n == "none"
+}
+
+// IsEmpty indicates whether container IpcMode is empty
+func (n IpcMode) IsEmpty() bool {
+ return n == ""
+}
+
+// Valid indicates whether the ipc mode is valid.
func (n IpcMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- case "container":
- if len(parts) != 2 || parts[1] == "" {
- return false
- }
- default:
- return false
- }
- return true
+ return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
}
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 {
+ if len(parts) > 1 && parts[0] == "container" {
return parts[1]
}
return ""
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
index beec3d494..d45d0528f 100644
--- a/vendor/github.com/docker/docker/api/types/filters/parse.go
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -1,38 +1,45 @@
-// Package filters provides helper function to parse and handle command line
-// filter, used for example in docker ps or docker images commands.
+/*Package filters provides tools for encoding a mapping of keys to a set of
+multiple values.
+*/
package filters
import (
"encoding/json"
"errors"
- "fmt"
"regexp"
"strings"
"github.com/docker/docker/api/types/versions"
)
-// Args stores filter arguments as map key:{map key: bool}.
-// It contains an aggregation of the map of arguments (which are in the form
-// of -f 'key=value') based on the key, and stores values for the same key
-// in a map with string keys and boolean values.
-// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
-// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+// Args stores a mapping of keys to a set of multiple values.
type Args struct {
fields map[string]map[string]bool
}
-// NewArgs initializes a new Args struct.
-func NewArgs() Args {
- return Args{fields: map[string]map[string]bool{}}
+// KeyValuePair are used to initialize a new Args
+type KeyValuePair struct {
+ Key string
+ Value string
}
-// ParseFlag parses the argument to the filter flag. Like
-//
-// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
+// Arg creates a new KeyValuePair for initializing Args
+func Arg(key, value string) KeyValuePair {
+ return KeyValuePair{Key: key, Value: value}
+}
+
+// NewArgs returns a new Args populated with the initial args
+func NewArgs(initialArgs ...KeyValuePair) Args {
+ args := Args{fields: map[string]map[string]bool{}}
+ for _, arg := range initialArgs {
+ args.Add(arg.Key, arg.Value)
+ }
+ return args
+}
+
+// ParseFlag parses a key=value string and adds it to an Args.
//
-// If prev map is provided, then it is appended to, and returned. By default a new
-// map is created.
+// Deprecated: Use Args.Add()
func ParseFlag(arg string, prev Args) (Args, error) {
filters := prev
if len(arg) == 0 {
@@ -53,74 +60,95 @@ func ParseFlag(arg string, prev Args) (Args, error) {
return filters, nil
}
-// ErrBadFormat is an error returned in case of bad format for a filter.
+// ErrBadFormat is an error returned when a filter is not in the form key=value
+//
+// Deprecated: this error will be removed in a future version
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
-// ToParam packs the Args into a string for easy transport from client to server.
+// ToParam encodes the Args as args JSON encoded string
+//
+// Deprecated: use ToJSON
func ToParam(a Args) (string, error) {
- // this way we don't URL encode {}, just empty space
- if a.Len() == 0 {
- return "", nil
+ return ToJSON(a)
+}
+
+// MarshalJSON returns a JSON byte representation of the Args
+func (args Args) MarshalJSON() ([]byte, error) {
+ if len(args.fields) == 0 {
+ return []byte{}, nil
}
+ return json.Marshal(args.fields)
+}
- buf, err := json.Marshal(a.fields)
- if err != nil {
- return "", err
+// ToJSON returns the Args as a JSON encoded string
+func ToJSON(a Args) (string, error) {
+ if a.Len() == 0 {
+ return "", nil
}
- return string(buf), nil
+ buf, err := json.Marshal(a)
+ return string(buf), err
}
-// ToParamWithVersion packs the Args into a string for easy transport from client to server.
-// The generated string will depend on the specified version (corresponding to the API version).
+// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
+// then the encoded format will use an older legacy format where the values are a
+// list of strings, instead of a set.
+//
+// Deprecated: Use ToJSON
func ToParamWithVersion(version string, a Args) (string, error) {
- // this way we don't URL encode {}, just empty space
if a.Len() == 0 {
return "", nil
}
- // for daemons older than v1.10, filter must be of the form map[string][]string
- var buf []byte
- var err error
if version != "" && versions.LessThan(version, "1.22") {
- buf, err = json.Marshal(convertArgsToSlice(a.fields))
- } else {
- buf, err = json.Marshal(a.fields)
- }
- if err != nil {
- return "", err
+ buf, err := json.Marshal(convertArgsToSlice(a.fields))
+ return string(buf), err
}
- return string(buf), nil
+
+ return ToJSON(a)
}
-// FromParam unpacks the filter Args.
+// FromParam decodes a JSON encoded string into Args
+//
+// Deprecated: use FromJSON
func FromParam(p string) (Args, error) {
- if len(p) == 0 {
- return NewArgs(), nil
+ return FromJSON(p)
+}
+
+// FromJSON decodes a JSON encoded string into Args
+func FromJSON(p string) (Args, error) {
+ args := NewArgs()
+
+ if p == "" {
+ return args, nil
}
- r := strings.NewReader(p)
- d := json.NewDecoder(r)
+ raw := []byte(p)
+ err := json.Unmarshal(raw, &args)
+ if err == nil {
+ return args, nil
+ }
- m := map[string]map[string]bool{}
- if err := d.Decode(&m); err != nil {
- r.Seek(0, 0)
-
- // Allow parsing old arguments in slice format.
- // Because other libraries might be sending them in this format.
- deprecated := map[string][]string{}
- if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
- m = deprecatedArgs(deprecated)
- } else {
- return NewArgs(), err
- }
+ // Fallback to parsing arguments in the legacy slice format
+ deprecated := map[string][]string{}
+ if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
+ return args, err
+ }
+
+ args.fields = deprecatedArgs(deprecated)
+ return args, nil
+}
+
+// UnmarshalJSON populates the Args from JSON encode bytes
+func (args Args) UnmarshalJSON(raw []byte) error {
+ if len(raw) == 0 {
+ return nil
}
- return Args{m}, nil
+ return json.Unmarshal(raw, &args.fields)
}
-// Get returns the list of values associates with a field.
-// It returns a slice of strings to keep backwards compatibility with old code.
-func (filters Args) Get(field string) []string {
- values := filters.fields[field]
+// Get returns the list of values associated with the key
+func (args Args) Get(key string) []string {
+ values := args.fields[key]
if values == nil {
return make([]string, 0)
}
@@ -131,37 +159,34 @@ func (filters Args) Get(field string) []string {
return slice
}
-// Add adds a new value to a filter field.
-func (filters Args) Add(name, value string) {
- if _, ok := filters.fields[name]; ok {
- filters.fields[name][value] = true
+// Add a new value to the set of values
+func (args Args) Add(key, value string) {
+ if _, ok := args.fields[key]; ok {
+ args.fields[key][value] = true
} else {
- filters.fields[name] = map[string]bool{value: true}
+ args.fields[key] = map[string]bool{value: true}
}
}
-// Del removes a value from a filter field.
-func (filters Args) Del(name, value string) {
- if _, ok := filters.fields[name]; ok {
- delete(filters.fields[name], value)
- if len(filters.fields[name]) == 0 {
- delete(filters.fields, name)
+// Del removes a value from the set
+func (args Args) Del(key, value string) {
+ if _, ok := args.fields[key]; ok {
+ delete(args.fields[key], value)
+ if len(args.fields[key]) == 0 {
+ delete(args.fields, key)
}
}
}
-// Len returns the number of fields in the arguments.
-func (filters Args) Len() int {
- return len(filters.fields)
+// Len returns the number of keys in the mapping
+func (args Args) Len() int {
+ return len(args.fields)
}
-// MatchKVList returns true if the values for the specified field matches the ones
-// from the sources.
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-// field is 'label' and sources are {'label1': '1', 'label2': '2'}
-// it returns true.
-func (filters Args) MatchKVList(field string, sources map[string]string) bool {
- fieldValues := filters.fields[field]
+// MatchKVList returns true if all the pairs in sources exist as key=value
+// pairs in the mapping at key, or if there are no values at key.
+func (args Args) MatchKVList(key string, sources map[string]string) bool {
+ fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
@@ -172,8 +197,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
return false
}
- for name2match := range fieldValues {
- testKV := strings.SplitN(name2match, "=", 2)
+ for value := range fieldValues {
+ testKV := strings.SplitN(value, "=", 2)
v, ok := sources[testKV[0]]
if !ok {
@@ -187,16 +212,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
return true
}
-// Match returns true if the values for the specified field matches the source string
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-// field is 'image.name' and source is 'ubuntu'
-// it returns true.
-func (filters Args) Match(field, source string) bool {
- if filters.ExactMatch(field, source) {
+// Match returns true if any of the values at key match the source string
+func (args Args) Match(field, source string) bool {
+ if args.ExactMatch(field, source) {
return true
}
- fieldValues := filters.fields[field]
+ fieldValues := args.fields[field]
for name2match := range fieldValues {
match, err := regexp.MatchString(name2match, source)
if err != nil {
@@ -209,9 +231,9 @@ func (filters Args) Match(field, source string) bool {
return false
}
-// ExactMatch returns true if the source matches exactly one of the filters.
-func (filters Args) ExactMatch(field, source string) bool {
- fieldValues, ok := filters.fields[field]
+// ExactMatch returns true if the source matches exactly one of the values.
+func (args Args) ExactMatch(key, source string) bool {
+ fieldValues, ok := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
if !ok || len(fieldValues) == 0 {
return true
@@ -221,14 +243,15 @@ func (filters Args) ExactMatch(field, source string) bool {
return fieldValues[source]
}
-// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
-func (filters Args) UniqueExactMatch(field, source string) bool {
- fieldValues := filters.fields[field]
+// UniqueExactMatch returns true if there is only one value and the source
+// matches exactly the value.
+func (args Args) UniqueExactMatch(key, source string) bool {
+ fieldValues := args.fields[key]
//do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
- if len(filters.fields[field]) != 1 {
+ if len(args.fields[key]) != 1 {
return false
}
@@ -236,14 +259,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool {
return fieldValues[source]
}
-// FuzzyMatch returns true if the source matches exactly one of the filters,
-// or the source has one of the filters as a prefix.
-func (filters Args) FuzzyMatch(field, source string) bool {
- if filters.ExactMatch(field, source) {
+// FuzzyMatch returns true if the source matches exactly one value, or the
+// source has one of the values as a prefix.
+func (args Args) FuzzyMatch(key, source string) bool {
+ if args.ExactMatch(key, source) {
return true
}
- fieldValues := filters.fields[field]
+ fieldValues := args.fields[key]
for prefix := range fieldValues {
if strings.HasPrefix(source, prefix) {
return true
@@ -252,30 +275,47 @@ func (filters Args) FuzzyMatch(field, source string) bool {
return false
}
-// Include returns true if the name of the field to filter is in the filters.
-func (filters Args) Include(field string) bool {
- _, ok := filters.fields[field]
+// Include returns true if the key exists in the mapping
+//
+// Deprecated: use Contains
+func (args Args) Include(field string) bool {
+ _, ok := args.fields[field]
+ return ok
+}
+
+// Contains returns true if the key exists in the mapping
+func (args Args) Contains(field string) bool {
+ _, ok := args.fields[field]
return ok
}
-// Validate ensures that all the fields in the filter are valid.
-// It returns an error as soon as it finds an invalid field.
-func (filters Args) Validate(accepted map[string]bool) error {
- for name := range filters.fields {
+type invalidFilter string
+
+func (e invalidFilter) Error() string {
+ return "Invalid filter '" + string(e) + "'"
+}
+
+func (invalidFilter) InvalidParameter() {}
+
+// Validate compared the set of accepted keys against the keys in the mapping.
+// An error is returned if any mapping keys are not in the accepted set.
+func (args Args) Validate(accepted map[string]bool) error {
+ for name := range args.fields {
if !accepted[name] {
- return fmt.Errorf("Invalid filter '%s'", name)
+ return invalidFilter(name)
}
}
return nil
}
-// WalkValues iterates over the list of filtered values for a field.
-// It stops the iteration if it finds an error and it returns that error.
-func (filters Args) WalkValues(field string, op func(value string) error) error {
- if _, ok := filters.fields[field]; !ok {
+// WalkValues iterates over the list of values for a key in the mapping and calls
+// op() for each value. If op returns an error the iteration stops and the
+// error is returned.
+func (args Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := args.fields[field]; !ok {
return nil
}
- for v := range filters.fields[field] {
+ for v := range args.fields[field] {
if err := op(v); err != nil {
return err
}
diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go
index 2744f85d6..b7d133cd8 100644
--- a/vendor/github.com/docker/docker/api/types/mount/mount.go
+++ b/vendor/github.com/docker/docker/api/types/mount/mount.go
@@ -15,6 +15,8 @@ const (
TypeVolume Type = "volume"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs Type = "tmpfs"
+ // TypeNamedPipe is the type for mounting Windows named pipes
+ TypeNamedPipe Type = "npipe"
)
// Mount represents a mount (volume).
@@ -65,7 +67,7 @@ var Propagations = []Propagation{
type Consistency string
const (
- // ConsistencyFull guarantees bind-mount-like consistency
+ // ConsistencyFull guarantees bind mount-like consistency
ConsistencyFull Consistency = "consistent"
// ConsistencyCached mounts can cache read data and FS structure
ConsistencyCached Consistency = "cached"
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
index 9aa9702da..ed9c1168b 100644
--- a/vendor/github.com/docker/docker/api/types/time/timestamp.go
+++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go
@@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
}
var format string
- var parseInLocation bool
-
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
- parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+ parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
if strings.Contains(value, ".") {
if parseInLocation {
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go
new file mode 100644
index 000000000..f23c6874b
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go
@@ -0,0 +1,46 @@
+// Package command contains the set of Dockerfile commands.
+package command
+
+// Define constants for the command strings
+const (
+ Add = "add"
+ Arg = "arg"
+ Cmd = "cmd"
+ Copy = "copy"
+ Entrypoint = "entrypoint"
+ Env = "env"
+ Expose = "expose"
+ From = "from"
+ Healthcheck = "healthcheck"
+ Label = "label"
+ Maintainer = "maintainer"
+ Onbuild = "onbuild"
+ Run = "run"
+ Shell = "shell"
+ StopSignal = "stopsignal"
+ User = "user"
+ Volume = "volume"
+ Workdir = "workdir"
+)
+
+// Commands is list of all Dockerfile commands
+var Commands = map[string]struct{}{
+ Add: {},
+ Arg: {},
+ Cmd: {},
+ Copy: {},
+ Entrypoint: {},
+ Env: {},
+ Expose: {},
+ From: {},
+ Healthcheck: {},
+ Label: {},
+ Maintainer: {},
+ Onbuild: {},
+ Run: {},
+ Shell: {},
+ StopSignal: {},
+ User: {},
+ Volume: {},
+ Workdir: {},
+}
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go
new file mode 100644
index 000000000..2c375b74e
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go
@@ -0,0 +1,399 @@
+package parser
+
+// line parsers are dispatch calls that parse a single unit of text into a
+// Node object which contains the whole statement. Dockerfiles have varied
+// (but not usually unique, see ONBUILD for a unique example) parsing rules
+// per-command, and these unify the processing in a way that makes it
+// manageable.
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+)
+
+var (
+ errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
+)
+
+const (
+ commandLabel = "LABEL"
+)
+
+// ignore the current argument. This will still leave a command parsed, but
+// will not incorporate the arguments into the ast.
+func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
+ return &Node{}, nil, nil
+}
+
+// used for onbuild. Could potentially be used for anything that represents a
+// statement with sub-statements.
+//
+// ONBUILD RUN foo bar -> (onbuild (run foo bar))
+//
+func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ child, err := newNodeFromLine(rest, d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &Node{Children: []*Node{child}}, nil, nil
+}
+
+// helper to parse words (i.e space delimited or quoted strings) in a statement.
+// The quotes are preserved as part of this function and they are stripped later
+// as part of processWords().
+func parseWords(rest string, d *Directive) []string {
+ const (
+ inSpaces = iota // looking for start of a word
+ inWord
+ inQuote
+ )
+
+ words := []string{}
+ phase := inSpaces
+ word := ""
+ quote := '\000'
+ blankOK := false
+ var ch rune
+ var chWidth int
+
+ for pos := 0; pos <= len(rest); pos += chWidth {
+ if pos != len(rest) {
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+
+ if phase == inSpaces { // Looking for start of word
+ if pos == len(rest) { // end of input
+ break
+ }
+ if unicode.IsSpace(ch) { // skip spaces
+ continue
+ }
+ phase = inWord // found it, fall through
+ }
+ if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ break
+ }
+ if phase == inWord {
+ if unicode.IsSpace(ch) {
+ phase = inSpaces
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ word = ""
+ blankOK = false
+ continue
+ }
+ if ch == '\'' || ch == '"' {
+ quote = ch
+ blankOK = true
+ phase = inQuote
+ }
+ if ch == d.escapeToken {
+ if pos+chWidth == len(rest) {
+ continue // just skip an escape token at end of line
+ }
+ // If we're not quoted and we see an escape token, then always just
+ // add the escape token plus the char to the word, even if the char
+ // is a quote.
+ word += string(ch)
+ pos += chWidth
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+ word += string(ch)
+ continue
+ }
+ if phase == inQuote {
+ if ch == quote {
+ phase = inWord
+ }
+ // The escape token is special except for ' quotes - can't escape anything for '
+ if ch == d.escapeToken && quote != '\'' {
+ if pos+chWidth == len(rest) {
+ phase = inWord
+ continue // just skip the escape token at end
+ }
+ pos += chWidth
+ word += string(ch)
+ ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
+ }
+ word += string(ch)
+ }
+ }
+
+ return words
+}
+
+// parse environment like statements. Note that this does *not* handle
+// variable interpolation, which will be handled in the evaluator.
+func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
+ // This is kind of tricky because we need to support the old
+ // variant: KEY name value
+ // as well as the new one: KEY name=value ...
+ // The trigger to know which one is being used will be whether we hit
+ // a space or = first. space ==> old, "=" ==> new
+
+ words := parseWords(rest, d)
+ if len(words) == 0 {
+ return nil, nil
+ }
+
+ // Old format (KEY name value)
+ if !strings.Contains(words[0], "=") {
+ parts := tokenWhitespace.Split(rest, 2)
+ if len(parts) < 2 {
+ return nil, fmt.Errorf(key + " must have two arguments")
+ }
+ return newKeyValueNode(parts[0], parts[1]), nil
+ }
+
+ var rootNode *Node
+ var prevNode *Node
+ for _, word := range words {
+ if !strings.Contains(word, "=") {
+ return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
+ }
+
+ parts := strings.SplitN(word, "=", 2)
+ node := newKeyValueNode(parts[0], parts[1])
+ rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
+ }
+
+ return rootNode, nil
+}
+
+func newKeyValueNode(key, value string) *Node {
+ return &Node{
+ Value: key,
+ Next: &Node{Value: value},
+ }
+}
+
+func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
+ if rootNode == nil {
+ rootNode = node
+ }
+ if prevNode != nil {
+ prevNode.Next = node
+ }
+
+ prevNode = node.Next
+ return rootNode, prevNode
+}
+
+func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, err := parseNameVal(rest, "ENV", d)
+ return node, nil, err
+}
+
+func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, err := parseNameVal(rest, commandLabel, d)
+ return node, nil, err
+}
+
+// NodeFromLabels returns a Node for the injected labels
+func NodeFromLabels(labels map[string]string) *Node {
+ keys := []string{}
+ for key := range labels {
+ keys = append(keys, key)
+ }
+ // Sort the label to have a repeatable order
+ sort.Strings(keys)
+
+ labelPairs := []string{}
+ var rootNode *Node
+ var prevNode *Node
+ for _, key := range keys {
+ value := labels[key]
+ labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value))
+ // Value must be single quoted to prevent env variable expansion
+ // See https://github.com/docker/docker/issues/26027
+ node := newKeyValueNode(key, "'"+value+"'")
+ rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
+ }
+
+ return &Node{
+ Value: command.Label,
+ Original: commandLabel + " " + strings.Join(labelPairs, " "),
+ Next: rootNode,
+ }
+}
+
+// parses a statement containing one or more keyword definition(s) and/or
+// value assignments, like `name1 name2= name3="" name4=value`.
+// Note that this is a stricter format than the old format of assignment,
+// allowed by parseNameVal(), in a way that this only allows assignment of the
+// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above.
+// In addition, a keyword definition alone is of the form `keyword` like `name1`
+// above. And the assignments `name2=` and `name3=""` are equivalent and
+// assign an empty value to the respective keywords.
+func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
+ words := parseWords(rest, d)
+ if len(words) == 0 {
+ return nil, nil, nil
+ }
+
+ var (
+ rootnode *Node
+ prevNode *Node
+ )
+ for i, word := range words {
+ node := &Node{}
+ node.Value = word
+ if i == 0 {
+ rootnode = node
+ } else {
+ prevNode.Next = node
+ }
+ prevNode = node
+ }
+
+ return rootnode, nil, nil
+}
+
+// parses a whitespace-delimited set of arguments. The result is effectively a
+// linked list of string arguments.
+func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ node := &Node{}
+ rootnode := node
+ prevnode := node
+ for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
+ prevnode = node
+ node.Value = str
+ node.Next = &Node{}
+ node = node.Next
+ }
+
+ // XXX to get around regexp.Split *always* providing an empty string at the
+ // end due to how our loop is constructed, nil out the last node in the
+ // chain.
+ prevnode.Next = nil
+
+ return rootnode, nil, nil
+}
+
+// parseString just wraps the string in quotes and returns a working node.
+func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+ n := &Node{}
+ n.Value = rest
+ return n, nil, nil
+}
+
+// parseJSON converts JSON arrays to an AST.
+func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
+ rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
+ if !strings.HasPrefix(rest, "[") {
+ return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
+ }
+
+ var myJSON []interface{}
+ if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
+ return nil, nil, err
+ }
+
+ var top, prev *Node
+ for _, str := range myJSON {
+ s, ok := str.(string)
+ if !ok {
+ return nil, nil, errDockerfileNotStringArray
+ }
+
+ node := &Node{Value: s}
+ if prev == nil {
+ top = node
+ } else {
+ prev.Next = node
+ }
+ prev = node
+ }
+
+ return top, map[string]bool{"json": true}, nil
+}
+
+// parseMaybeJSON determines if the argument appears to be a JSON array. If
+// so, passes to parseJSON; if not, quotes the result and returns a single
+// node.
+func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
+ if rest == "" {
+ return nil, nil, nil
+ }
+
+ node, attrs, err := parseJSON(rest, d)
+
+ if err == nil {
+ return node, attrs, nil
+ }
+ if err == errDockerfileNotStringArray {
+ return nil, nil, err
+ }
+
+ node = &Node{}
+ node.Value = rest
+ return node, nil, nil
+}
+
+// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
+// so, passes to parseJSON; if not, attempts to parse it as a whitespace
+// delimited string.
+func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
+ node, attrs, err := parseJSON(rest, d)
+
+ if err == nil {
+ return node, attrs, nil
+ }
+ if err == errDockerfileNotStringArray {
+ return nil, nil, err
+ }
+
+ return parseStringsWhitespaceDelimited(rest, d)
+}
+
+// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
+func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
+ // Find end of first argument
+ var sep int
+ for ; sep < len(rest); sep++ {
+ if unicode.IsSpace(rune(rest[sep])) {
+ break
+ }
+ }
+ next := sep
+ for ; next < len(rest); next++ {
+ if !unicode.IsSpace(rune(rest[next])) {
+ break
+ }
+ }
+
+ if sep == 0 {
+ return nil, nil, nil
+ }
+
+ typ := rest[:sep]
+ cmd, attrs, err := parseMaybeJSON(rest[next:], d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &Node{Value: typ, Next: cmd}, attrs, err
+}
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go
new file mode 100644
index 000000000..42a84c630
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go
@@ -0,0 +1,363 @@
+// Package parser implements a parser and parse tree dumper for Dockerfiles.
+package parser
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+ "github.com/docker/docker/pkg/system"
+ "github.com/pkg/errors"
+)
+
+// Node is a structure used to represent a parse tree.
+//
+// In the node there are three fields, Value, Next, and Children. Value is the
+// current token's string value. Next is always the next non-child token, and
+// children contains all the children. Here's an example:
+//
+// (value next (child child-next child-next-next) next-next)
+//
+// This data structure is frankly pretty lousy for handling complex languages,
+// but lucky for us the Dockerfile isn't very complicated. This structure
+// works a little more effectively than a "proper" parse tree for our needs.
+//
+type Node struct {
+ Value string // actual content
+ Next *Node // the next item in the current sexp
+ Children []*Node // the children of this sexp
+ Attributes map[string]bool // special attributes for this node
+ Original string // original line used before parsing
+ Flags []string // only top Node should have this set
+ StartLine int // the line in the original dockerfile where the node begins
+ endLine int // the line in the original dockerfile where the node ends
+}
+
+// Dump dumps the AST defined by `node` as a list of sexps.
+// Returns a string suitable for printing.
+func (node *Node) Dump() string {
+ str := ""
+ str += node.Value
+
+ if len(node.Flags) > 0 {
+ str += fmt.Sprintf(" %q", node.Flags)
+ }
+
+ for _, n := range node.Children {
+ str += "(" + n.Dump() + ")\n"
+ }
+
+ for n := node.Next; n != nil; n = n.Next {
+ if len(n.Children) > 0 {
+ str += " " + n.Dump()
+ } else {
+ str += " " + strconv.Quote(n.Value)
+ }
+ }
+
+ return strings.TrimSpace(str)
+}
+
+func (node *Node) lines(start, end int) {
+ node.StartLine = start
+ node.endLine = end
+}
+
+// AddChild adds a new child node, and updates line information
+func (node *Node) AddChild(child *Node, startLine, endLine int) {
+ child.lines(startLine, endLine)
+ if node.StartLine < 0 {
+ node.StartLine = startLine
+ }
+ node.endLine = endLine
+ node.Children = append(node.Children, child)
+}
+
+var (
+ dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error)
+ tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
+ tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
+ tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P<platform>.*)$`)
+ tokenComment = regexp.MustCompile(`^#.*$`)
+)
+
+// DefaultEscapeToken is the default escape token
+const DefaultEscapeToken = '\\'
+
+// defaultPlatformToken is the platform assumed for the build if not explicitly provided
+var defaultPlatformToken = runtime.GOOS
+
+// Directive is the structure used during a build run to hold the state of
+// parsing directives.
+type Directive struct {
+ escapeToken rune // Current escape token
+ platformToken string // Current platform token
+ lineContinuationRegex *regexp.Regexp // Current line continuation regex
+ processingComplete bool // Whether we are done looking for directives
+ escapeSeen bool // Whether the escape directive has been seen
+ platformSeen bool // Whether the platform directive has been seen
+}
+
+// setEscapeToken sets the default token for escaping characters in a Dockerfile.
+func (d *Directive) setEscapeToken(s string) error {
+ if s != "`" && s != "\\" {
+ return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
+ }
+ d.escapeToken = rune(s[0])
+ d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
+ return nil
+}
+
+// setPlatformToken sets the default platform for pulling images in a Dockerfile.
+func (d *Directive) setPlatformToken(s string) error {
+ s = strings.ToLower(s)
+ valid := []string{runtime.GOOS}
+ if system.LCOWSupported() {
+ valid = append(valid, "linux")
+ }
+ for _, item := range valid {
+ if s == item {
+ d.platformToken = s
+ return nil
+ }
+ }
+ return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid)
+}
+
+// possibleParserDirective looks for one or more parser directives '# escapeToken=<char>' and
+// '# platform=<string>'. Parser directives must precede any builder instruction
+// or other comments, and cannot be repeated.
+func (d *Directive) possibleParserDirective(line string) error {
+ if d.processingComplete {
+ return nil
+ }
+
+ tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
+ if len(tecMatch) != 0 {
+ for i, n := range tokenEscapeCommand.SubexpNames() {
+ if n == "escapechar" {
+ if d.escapeSeen {
+ return errors.New("only one escape parser directive can be used")
+ }
+ d.escapeSeen = true
+ return d.setEscapeToken(tecMatch[i])
+ }
+ }
+ }
+
+ // TODO @jhowardmsft LCOW Support: Eventually this check can be removed,
+ // but only recognise a platform token if running in LCOW mode.
+ if system.LCOWSupported() {
+ tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line))
+ if len(tpcMatch) != 0 {
+ for i, n := range tokenPlatformCommand.SubexpNames() {
+ if n == "platform" {
+ if d.platformSeen {
+ return errors.New("only one platform parser directive can be used")
+ }
+ d.platformSeen = true
+ return d.setPlatformToken(tpcMatch[i])
+ }
+ }
+ }
+ }
+
+ d.processingComplete = true
+ return nil
+}
+
+// NewDefaultDirective returns a new Directive with the default escapeToken token
+func NewDefaultDirective() *Directive {
+ directive := Directive{}
+ directive.setEscapeToken(string(DefaultEscapeToken))
+ directive.setPlatformToken(defaultPlatformToken)
+ return &directive
+}
+
+func init() {
+ // Dispatch Table. see line_parsers.go for the parse functions.
+ // The command is parsed and mapped to the line parser. The line parser
+ // receives the arguments but not the command, and returns an AST after
+ // reformulating the arguments according to the rules in the parser
+ // functions. Errors are propagated up by Parse() and the resulting AST can
+ // be incorporated directly into the existing AST as a next.
+ dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
+ command.Add: parseMaybeJSONToList,
+ command.Arg: parseNameOrNameVal,
+ command.Cmd: parseMaybeJSON,
+ command.Copy: parseMaybeJSONToList,
+ command.Entrypoint: parseMaybeJSON,
+ command.Env: parseEnv,
+ command.Expose: parseStringsWhitespaceDelimited,
+ command.From: parseStringsWhitespaceDelimited,
+ command.Healthcheck: parseHealthConfig,
+ command.Label: parseLabel,
+ command.Maintainer: parseString,
+ command.Onbuild: parseSubCommand,
+ command.Run: parseMaybeJSON,
+ command.Shell: parseMaybeJSON,
+ command.StopSignal: parseString,
+ command.User: parseString,
+ command.Volume: parseMaybeJSONToList,
+ command.Workdir: parseString,
+ }
+}
+
+// newNodeFromLine splits the line into parts, and dispatches to a function
+// based on the command and command arguments. A Node is created from the
+// result of the dispatch.
+func newNodeFromLine(line string, directive *Directive) (*Node, error) {
+ cmd, flags, args, err := splitCommand(line)
+ if err != nil {
+ return nil, err
+ }
+
+ fn := dispatch[cmd]
+ // Ignore invalid Dockerfile instructions
+ if fn == nil {
+ fn = parseIgnore
+ }
+ next, attrs, err := fn(args, directive)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Node{
+ Value: cmd,
+ Original: line,
+ Flags: flags,
+ Next: next,
+ Attributes: attrs,
+ }, nil
+}
+
+// Result is the result of parsing a Dockerfile
+type Result struct {
+ AST *Node
+ EscapeToken rune
+ Platform string
+ Warnings []string
+}
+
+// PrintWarnings to the writer
+func (r *Result) PrintWarnings(out io.Writer) {
+ if len(r.Warnings) == 0 {
+ return
+ }
+ fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
+}
+
+// Parse reads lines from a Reader, parses the lines into an AST and returns
+// the AST and escape token
+func Parse(rwc io.Reader) (*Result, error) {
+ d := NewDefaultDirective()
+ currentLine := 0
+ root := &Node{StartLine: -1}
+ scanner := bufio.NewScanner(rwc)
+ warnings := []string{}
+
+ var err error
+ for scanner.Scan() {
+ bytesRead := scanner.Bytes()
+ if currentLine == 0 {
+ // First line, strip the byte-order-marker if present
+ bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
+ }
+ bytesRead, err = processLine(d, bytesRead, true)
+ if err != nil {
+ return nil, err
+ }
+ currentLine++
+
+ startLine := currentLine
+ line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
+ if isEndOfLine && line == "" {
+ continue
+ }
+
+ var hasEmptyContinuationLine bool
+ for !isEndOfLine && scanner.Scan() {
+ bytesRead, err := processLine(d, scanner.Bytes(), false)
+ if err != nil {
+ return nil, err
+ }
+ currentLine++
+
+ if isComment(scanner.Bytes()) {
+ // original line was a comment (processLine strips comments)
+ continue
+ }
+ if isEmptyContinuationLine(bytesRead) {
+ hasEmptyContinuationLine = true
+ continue
+ }
+
+ continuationLine := string(bytesRead)
+ continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
+ line += continuationLine
+ }
+
+ if hasEmptyContinuationLine {
+ warning := "[WARNING]: Empty continuation line found in:\n " + line
+ warnings = append(warnings, warning)
+ }
+
+ child, err := newNodeFromLine(line, d)
+ if err != nil {
+ return nil, err
+ }
+ root.AddChild(child, startLine, currentLine)
+ }
+
+ if len(warnings) > 0 {
+ warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
+ }
+ return &Result{
+ AST: root,
+ Warnings: warnings,
+ EscapeToken: d.escapeToken,
+ Platform: d.platformToken,
+ }, nil
+}
+
+func trimComments(src []byte) []byte {
+ return tokenComment.ReplaceAll(src, []byte{})
+}
+
+func trimWhitespace(src []byte) []byte {
+ return bytes.TrimLeftFunc(src, unicode.IsSpace)
+}
+
+func isComment(line []byte) bool {
+ return tokenComment.Match(trimWhitespace(line))
+}
+
+func isEmptyContinuationLine(line []byte) bool {
+ return len(trimWhitespace(line)) == 0
+}
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+func trimContinuationCharacter(line string, d *Directive) (string, bool) {
+ if d.lineContinuationRegex.MatchString(line) {
+ line = d.lineContinuationRegex.ReplaceAllString(line, "")
+ return line, false
+ }
+ return line, true
+}
+
+// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
+// to preserve whitespace on continuation lines. Why is that done?
+func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
+ if stripLeftWhitespace {
+ token = trimWhitespace(token)
+ }
+ return trimComments(token), d.possibleParserDirective(string(token))
+}
diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go
new file mode 100644
index 000000000..171f454f6
--- /dev/null
+++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/split_command.go
@@ -0,0 +1,118 @@
+package parser
+
+import (
+ "strings"
+ "unicode"
+)
+
+// splitCommand takes a single line of text and parses out the cmd and args,
+// which are used for dispatching to more exact parsing functions.
+func splitCommand(line string) (string, []string, string, error) {
+ var args string
+ var flags []string
+
+ // Make sure we get the same results irrespective of leading/trailing spaces
+ cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
+ cmd := strings.ToLower(cmdline[0])
+
+ if len(cmdline) == 2 {
+ var err error
+ args, flags, err = extractBuilderFlags(cmdline[1])
+ if err != nil {
+ return "", nil, "", err
+ }
+ }
+
+ return cmd, flags, strings.TrimSpace(args), nil
+}
+
+func extractBuilderFlags(line string) (string, []string, error) {
+ // Parses the BuilderFlags and returns the remaining part of the line
+
+ const (
+ inSpaces = iota // looking for start of a word
+ inWord
+ inQuote
+ )
+
+ words := []string{}
+ phase := inSpaces
+ word := ""
+ quote := '\000'
+ blankOK := false
+ var ch rune
+
+ for pos := 0; pos <= len(line); pos++ {
+ if pos != len(line) {
+ ch = rune(line[pos])
+ }
+
+ if phase == inSpaces { // Looking for start of word
+ if pos == len(line) { // end of input
+ break
+ }
+ if unicode.IsSpace(ch) { // skip spaces
+ continue
+ }
+
+ // Only keep going if the next word starts with --
+ if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
+ return line[pos:], words, nil
+ }
+
+ phase = inWord // found something with "--", fall through
+ }
+ if (phase == inWord || phase == inQuote) && (pos == len(line)) {
+ if word != "--" && (blankOK || len(word) > 0) {
+ words = append(words, word)
+ }
+ break
+ }
+ if phase == inWord {
+ if unicode.IsSpace(ch) {
+ phase = inSpaces
+ if word == "--" {
+ return line[pos:], words, nil
+ }
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ word = ""
+ blankOK = false
+ continue
+ }
+ if ch == '\'' || ch == '"' {
+ quote = ch
+ blankOK = true
+ phase = inQuote
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ continue
+ }
+ if phase == inQuote {
+ if ch == quote {
+ phase = inWord
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ phase = inWord
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ }
+ }
+
+ return "", words, nil
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go
index ffe44bc97..9835bad5c 100644
--- a/vendor/github.com/docker/docker/client/checkpoint_list.go
+++ b/vendor/github.com/docker/docker/client/checkpoint_list.go
@@ -2,7 +2,6 @@ package client
import (
"encoding/json"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -20,10 +19,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return checkpoints, containerNotFoundError{container}
- }
- return checkpoints, err
+ return checkpoints, wrapResponseError(err, resp, "container", container)
}
err = json.NewDecoder(resp.body).Decode(&checkpoints)
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index c4e3914b1..893124853 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -1,10 +1,6 @@
/*
Package client is a Go client for the Docker Engine API.
-The "docker" command uses this package to communicate with the daemon. It can also
-be used by your own Go applications to do anything the command-line interface does
-- running containers, pulling images, managing swarms, etc.
-
For more information about the Engine API, see the documentation:
https://docs.docker.com/engine/reference/api/
@@ -51,6 +47,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"strings"
@@ -159,7 +156,7 @@ func NewEnvClient() (*Client, error) {
// highly recommended that you set a version or your client may break if the
// server is upgraded.
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
- proto, addr, basePath, err := ParseHost(host)
+ hostURL, err := ParseHostURL(host)
if err != nil {
return nil, err
}
@@ -170,7 +167,7 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
}
} else {
transport := new(http.Transport)
- sockets.ConfigureTransport(transport, proto, addr)
+ sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
client = &http.Client{
Transport: transport,
CheckRedirect: CheckRedirect,
@@ -188,28 +185,24 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
scheme = "https"
}
+ // TODO: store URL instead of proto/addr/basePath
return &Client{
scheme: scheme,
host: host,
- proto: proto,
- addr: addr,
- basePath: basePath,
+ proto: hostURL.Scheme,
+ addr: hostURL.Host,
+ basePath: hostURL.Path,
client: client,
version: version,
customHTTPHeaders: httpHeaders,
}, nil
}
-// Close ensures that transport.Client is closed
-// especially needed while using NewClient with *http.Client = nil
-// for example
-// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
+// Close the transport used by the client
func (cli *Client) Close() error {
-
if t, ok := cli.client.Transport.(*http.Transport); ok {
t.CloseIdleConnections()
}
-
return nil
}
@@ -219,37 +212,27 @@ func (cli *Client) getAPIPath(p string, query url.Values) string {
var apiPath string
if cli.version != "" {
v := strings.TrimPrefix(cli.version, "v")
- apiPath = cli.basePath + "/v" + v + p
+ apiPath = path.Join(cli.basePath, "/v"+v, p)
} else {
- apiPath = cli.basePath + p
- }
-
- u := &url.URL{
- Path: apiPath,
- }
- if len(query) > 0 {
- u.RawQuery = query.Encode()
+ apiPath = path.Join(cli.basePath, p)
}
- return u.String()
+ return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
}
-// ClientVersion returns the version string associated with this
-// instance of the Client. Note that this value can be changed
-// via the DOCKER_API_VERSION env var.
-// This operation doesn't acquire a mutex.
+// ClientVersion returns the API version used by this client.
func (cli *Client) ClientVersion() string {
return cli.version
}
-// NegotiateAPIVersion updates the version string associated with this
-// instance of the Client to match the latest version the server supports
+// NegotiateAPIVersion queries the API and updates the version to match the
+// API version. Any errors are silently ignored.
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
ping, _ := cli.Ping(ctx)
cli.NegotiateAPIVersionPing(ping)
}
-// NegotiateAPIVersionPing updates the version string associated with this
-// instance of the Client to match the latest version the server supports
+// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
+// if the ping version is less than the default version.
func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
if cli.manualOverride {
return
@@ -265,23 +248,34 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
cli.version = api.DefaultVersion
}
- // if server version is lower than the maximum version supported by the Client, downgrade
- if versions.LessThan(p.APIVersion, api.DefaultVersion) {
+ // if server version is lower than the client version, downgrade
+ if versions.LessThan(p.APIVersion, cli.version) {
cli.version = p.APIVersion
}
}
-// DaemonHost returns the host associated with this instance of the Client.
-// This operation doesn't acquire a mutex.
+// DaemonHost returns the host address used by the client
func (cli *Client) DaemonHost() string {
return cli.host
}
-// ParseHost verifies that the given host strings is valid.
+// ParseHost parses a url string, validates the strings is a host url, and returns
+// the parsed host as: protocol, address, and base path
+// Deprecated: use ParseHostURL
func ParseHost(host string) (string, string, string, error) {
+ hostURL, err := ParseHostURL(host)
+ if err != nil {
+ return "", "", "", err
+ }
+ return hostURL.Scheme, hostURL.Host, hostURL.Path, nil
+}
+
+// ParseHostURL parses a url string, validates the string is a host url, and
+// returns the parsed URL
+func ParseHostURL(host string) (*url.URL, error) {
protoAddrParts := strings.SplitN(host, "://", 2)
if len(protoAddrParts) == 1 {
- return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
+ return nil, fmt.Errorf("unable to parse docker host `%s`", host)
}
var basePath string
@@ -289,16 +283,19 @@ func ParseHost(host string) (string, string, string, error) {
if proto == "tcp" {
parsed, err := url.Parse("tcp://" + addr)
if err != nil {
- return "", "", "", err
+ return nil, err
}
addr = parsed.Host
basePath = parsed.Path
}
- return proto, addr, basePath, nil
+ return &url.URL{
+ Scheme: proto,
+ Host: addr,
+ Path: basePath,
+ }, nil
}
-// CustomHTTPHeaders returns the custom http headers associated with this
-// instance of the Client. This operation doesn't acquire a mutex.
+// CustomHTTPHeaders returns the custom http headers stored by the client.
func (cli *Client) CustomHTTPHeaders() map[string]string {
m := make(map[string]string)
for k, v := range cli.customHTTPHeaders {
@@ -307,8 +304,7 @@ func (cli *Client) CustomHTTPHeaders() map[string]string {
return m
}
-// SetCustomHTTPHeaders updates the custom http headers associated with this
-// instance of the Client. This operation doesn't acquire a mutex.
+// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
cli.customHTTPHeaders = headers
}
diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go
index ebb6d636c..b44d6fdd7 100644
--- a/vendor/github.com/docker/docker/client/config_inspect.go
+++ b/vendor/github.com/docker/docker/client/config_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
@@ -17,10 +16,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C
}
resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return swarm.Config{}, nil, configNotFoundError{id}
- }
- return swarm.Config{}, nil, err
+ return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
index 8483ca14d..57febc9ff 100644
--- a/vendor/github.com/docker/docker/client/config_list.go
+++ b/vendor/github.com/docker/docker/client/config_list.go
@@ -18,7 +18,7 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go
index 726b5c853..e025d44f7 100644
--- a/vendor/github.com/docker/docker/client/config_remove.go
+++ b/vendor/github.com/docker/docker/client/config_remove.go
@@ -9,5 +9,5 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
}
resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "config", id)
}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
index 531d796ee..b3b16abfd 100644
--- a/vendor/github.com/docker/docker/client/container_commit.go
+++ b/vendor/github.com/docker/docker/client/container_commit.go
@@ -39,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option
for _, change := range options.Changes {
query.Add("changes", change)
}
- if options.Pause != true {
+ if !options.Pause {
query.Set("pause", "0")
}
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
index 6841b0b28..bd817e7fd 100644
--- a/vendor/github.com/docker/docker/client/container_create.go
+++ b/vendor/github.com/docker/docker/client/container_create.go
@@ -45,7 +45,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
if err != nil {
if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
- return response, imageNotFoundError{config.Image}
+ return response, objectNotFoundError{object: "image", id: config.Image}
}
return response, err
}
diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go
index 17f180974..a15db14be 100644
--- a/vendor/github.com/docker/docker/client/container_inspect.go
+++ b/vendor/github.com/docker/docker/client/container_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -15,10 +14,7 @@ import (
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ContainerJSON{}, containerNotFoundError{containerID}
- }
- return types.ContainerJSON{}, err
+ return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
}
var response types.ContainerJSON
@@ -35,10 +31,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri
}
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
- }
- return types.ContainerJSON{}, nil, err
+ return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go
index 3a79590ce..070108bf3 100644
--- a/vendor/github.com/docker/docker/client/container_remove.go
+++ b/vendor/github.com/docker/docker/client/container_remove.go
@@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "container", containerID)
}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
index fc7df9f1e..e41b728d3 100644
--- a/vendor/github.com/docker/docker/client/errors.go
+++ b/vendor/github.com/docker/docker/client/errors.go
@@ -3,6 +3,8 @@ package client
import (
"fmt"
+ "net/http"
+
"github.com/docker/docker/api/types/versions"
"github.com/pkg/errors"
)
@@ -36,95 +38,37 @@ type notFound interface {
NotFound() bool // Is the error a NotFound error
}
-// IsErrNotFound returns true if the error is caused with an
-// object (image, container, network, volume, …) is not found in the docker host.
+// IsErrNotFound returns true if the error is a NotFound error, which is returned
+// by the API when some object is not found.
func IsErrNotFound(err error) bool {
te, ok := err.(notFound)
return ok && te.NotFound()
}
-// imageNotFoundError implements an error returned when an image is not in the docker host.
-type imageNotFoundError struct {
- imageID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e imageNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of an imageNotFoundError
-func (e imageNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such image: %s", e.imageID)
-}
-
-// IsErrImageNotFound returns true if the error is caused
-// when an image is not found in the docker host.
-func IsErrImageNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// containerNotFoundError implements an error returned when a container is not in the docker host.
-type containerNotFoundError struct {
- containerID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e containerNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of a containerNotFoundError
-func (e containerNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such container: %s", e.containerID)
-}
-
-// IsErrContainerNotFound returns true if the error is caused
-// when a container is not found in the docker host.
-func IsErrContainerNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// networkNotFoundError implements an error returned when a network is not in the docker host.
-type networkNotFoundError struct {
- networkID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e networkNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of a networkNotFoundError
-func (e networkNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such network: %s", e.networkID)
+type objectNotFoundError struct {
+ object string
+ id string
}
-// IsErrNetworkNotFound returns true if the error is caused
-// when a network is not found in the docker host.
-func IsErrNetworkNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// volumeNotFoundError implements an error returned when a volume is not in the docker host.
-type volumeNotFoundError struct {
- volumeID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e volumeNotFoundError) NotFound() bool {
+func (e objectNotFoundError) NotFound() bool {
return true
}
-// Error returns a string representation of a volumeNotFoundError
-func (e volumeNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
+func (e objectNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
}
-// IsErrVolumeNotFound returns true if the error is caused
-// when a volume is not found in the docker host.
-func IsErrVolumeNotFound(err error) bool {
- return IsErrNotFound(err)
+func wrapResponseError(err error, resp serverResponse, object, id string) error {
+ switch {
+ case err == nil:
+ return nil
+ case resp.statusCode == http.StatusNotFound:
+ return objectNotFoundError{object: object, id: id}
+ case resp.statusCode == http.StatusNotImplemented:
+ return notImplementedError{message: err.Error()}
+ default:
+ return err
+ }
}
// unauthorizedError represents an authorization error in a remote registry.
@@ -144,72 +88,6 @@ func IsErrUnauthorized(err error) bool {
return ok
}
-// nodeNotFoundError implements an error returned when a node is not found.
-type nodeNotFoundError struct {
- nodeID string
-}
-
-// Error returns a string representation of a nodeNotFoundError
-func (e nodeNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such node: %s", e.nodeID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e nodeNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrNodeNotFound returns true if the error is caused
-// when a node is not found.
-func IsErrNodeNotFound(err error) bool {
- _, ok := err.(nodeNotFoundError)
- return ok
-}
-
-// serviceNotFoundError implements an error returned when a service is not found.
-type serviceNotFoundError struct {
- serviceID string
-}
-
-// Error returns a string representation of a serviceNotFoundError
-func (e serviceNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such service: %s", e.serviceID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e serviceNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrServiceNotFound returns true if the error is caused
-// when a service is not found.
-func IsErrServiceNotFound(err error) bool {
- _, ok := err.(serviceNotFoundError)
- return ok
-}
-
-// taskNotFoundError implements an error returned when a task is not found.
-type taskNotFoundError struct {
- taskID string
-}
-
-// Error returns a string representation of a taskNotFoundError
-func (e taskNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such task: %s", e.taskID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e taskNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrTaskNotFound returns true if the error is caused
-// when a task is not found.
-func IsErrTaskNotFound(err error) bool {
- _, ok := err.(taskNotFoundError)
- return ok
-}
-
type pluginPermissionDenied struct {
name string
}
@@ -225,76 +103,31 @@ func IsErrPluginPermissionDenied(err error) bool {
return ok
}
-// NewVersionError returns an error if the APIVersion required
-// if less than the current supported version
-func (cli *Client) NewVersionError(APIrequired, feature string) error {
- if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
- return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
- }
- return nil
-}
-
-// secretNotFoundError implements an error returned when a secret is not found.
-type secretNotFoundError struct {
- name string
-}
-
-// Error returns a string representation of a secretNotFoundError
-func (e secretNotFoundError) Error() string {
- return fmt.Sprintf("Error: no such secret: %s", e.name)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e secretNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrSecretNotFound returns true if the error is caused
-// when a secret is not found.
-func IsErrSecretNotFound(err error) bool {
- _, ok := err.(secretNotFoundError)
- return ok
-}
-
-// configNotFoundError implements an error returned when a config is not found.
-type configNotFoundError struct {
- name string
-}
-
-// Error returns a string representation of a configNotFoundError
-func (e configNotFoundError) Error() string {
- return fmt.Sprintf("Error: no such config: %s", e.name)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e configNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrConfigNotFound returns true if the error is caused
-// when a config is not found.
-func IsErrConfigNotFound(err error) bool {
- _, ok := err.(configNotFoundError)
- return ok
+type notImplementedError struct {
+ message string
}
-// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
-type pluginNotFoundError struct {
- name string
+func (e notImplementedError) Error() string {
+ return e.message
}
-// NotFound indicates that this error type is of NotFound
-func (e pluginNotFoundError) NotFound() bool {
+func (e notImplementedError) NotImplemented() bool {
return true
}
-// Error returns a string representation of a pluginNotFoundError
-func (e pluginNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such plugin: %s", e.name)
+// IsErrNotImplemented returns true if the error is a NotImplemented error.
+// This is returned by the API when a requested feature has not been
+// implemented.
+func IsErrNotImplemented(err error) bool {
+ te, ok := err.(notImplementedError)
+ return ok && te.NotImplemented()
}
-// IsErrPluginNotFound returns true if the error is caused
-// when a plugin is not found in the docker host.
-func IsErrPluginNotFound(err error) bool {
- return IsErrNotFound(err)
+// NewVersionError returns an error if the APIVersion required
+// if less than the current supported version
+func (cli *Client) NewVersionError(APIrequired, feature string) error {
+ if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
+ return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
+ }
+ return nil
}
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
index 8cf0119f3..d04cebdcf 100644
--- a/vendor/github.com/docker/docker/client/hijack.go
+++ b/vendor/github.com/docker/docker/client/hijack.go
@@ -12,7 +12,6 @@ import (
"time"
"github.com/docker/docker/api/types"
- "github.com/docker/docker/pkg/tlsconfig"
"github.com/docker/go-connections/sockets"
"github.com/pkg/errors"
"golang.org/x/net/context"
@@ -71,7 +70,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
- deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
@@ -115,7 +114,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
// from the hostname we're connecting to.
if config.ServerName == "" {
// Make a copy to avoid polluting argument or default.
- config = tlsconfig.Clone(config)
+ config = tlsConfigClone(config)
config.ServerName = hostname
}
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
index b3a64ce2f..1bc591990 100644
--- a/vendor/github.com/docker/docker/client/image_inspect.go
+++ b/vendor/github.com/docker/docker/client/image_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
@@ -14,10 +13,7 @@ import (
func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ImageInspect{}, nil, imageNotFoundError{imageID}
- }
- return types.ImageInspect{}, nil, err
+ return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
index 6921209ee..81d6c5438 100644
--- a/vendor/github.com/docker/docker/client/image_remove.go
+++ b/vendor/github.com/docker/docker/client/image_remove.go
@@ -19,12 +19,12 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type
query.Set("noprune", "1")
}
+ var dels []types.ImageDeleteResponseItem
resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
if err != nil {
- return nil, err
+ return dels, wrapResponseError(err, resp, "image", imageID)
}
- var dels []types.ImageDeleteResponseItem
err = json.NewDecoder(resp.body).Decode(&dels)
ensureReaderClosed(resp)
return dels, err
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
index b0fcd5c23..5566e9255 100644
--- a/vendor/github.com/docker/docker/client/image_search.go
+++ b/vendor/github.com/docker/docker/client/image_search.go
@@ -21,7 +21,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I
query.Set("limit", fmt.Sprintf("%d", options.Limit))
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return results, err
}
diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go
index 848c9799f..afabe6597 100644
--- a/vendor/github.com/docker/docker/client/network_inspect.go
+++ b/vendor/github.com/docker/docker/client/network_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -33,10 +32,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string,
}
resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return networkResource, nil, networkNotFoundError{networkID}
- }
- return networkResource, nil, err
+ return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go
index 6bd674892..0811b5b51 100644
--- a/vendor/github.com/docker/docker/client/network_remove.go
+++ b/vendor/github.com/docker/docker/client/network_remove.go
@@ -6,5 +6,5 @@ import "golang.org/x/net/context"
func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "network", networkID)
}
diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go
index abf505d29..791d2c006 100644
--- a/vendor/github.com/docker/docker/client/node_inspect.go
+++ b/vendor/github.com/docker/docker/client/node_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
@@ -14,10 +13,7 @@ import (
func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Node{}, nil, nodeNotFoundError{nodeID}
- }
- return swarm.Node{}, nil, err
+ return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
index 3e8440f08..fed22992c 100644
--- a/vendor/github.com/docker/docker/client/node_list.go
+++ b/vendor/github.com/docker/docker/client/node_list.go
@@ -15,7 +15,7 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions)
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
index 0a77f3d57..adbf52feb 100644
--- a/vendor/github.com/docker/docker/client/node_remove.go
+++ b/vendor/github.com/docker/docker/client/node_remove.go
@@ -17,5 +17,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.
resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "node", nodeID)
}
diff --git a/vendor/github.com/docker/docker/client/parse_logs.go b/vendor/github.com/docker/docker/client/parse_logs.go
deleted file mode 100644
index e427f80a7..000000000
--- a/vendor/github.com/docker/docker/client/parse_logs.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package client
-
-// parse_logs.go contains utility helpers for getting information out of docker
-// log lines. really, it only contains ParseDetails right now. maybe in the
-// future there will be some desire to parse log messages back into a struct?
-// that would go here if we did
-
-import (
- "net/url"
- "strings"
-
- "github.com/pkg/errors"
-)
-
-// ParseLogDetails takes a details string of key value pairs in the form
-// "k=v,l=w", where the keys and values are url query escaped, and each pair
-// is separated by a comma, returns a map. returns an error if the details
-// string is not in a valid format
-// the exact form of details encoding is implemented in
-// api/server/httputils/write_log_stream.go
-func ParseLogDetails(details string) (map[string]string, error) {
- pairs := strings.Split(details, ",")
- detailsMap := make(map[string]string, len(pairs))
- for _, pair := range pairs {
- p := strings.SplitN(pair, "=", 2)
- // if there is no equals sign, we will only get 1 part back
- if len(p) != 2 {
- return nil, errors.New("invalid details format")
- }
- k, err := url.QueryUnescape(p[0])
- if err != nil {
- return nil, err
- }
- v, err := url.QueryUnescape(p[1])
- if err != nil {
- return nil, err
- }
- detailsMap[k] = v
- }
- return detailsMap, nil
-}
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
index a4c2e2c4d..0b6e450da 100644
--- a/vendor/github.com/docker/docker/client/ping.go
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -1,6 +1,8 @@
package client
import (
+ "path"
+
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
)
@@ -8,7 +10,7 @@ import (
// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers
func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
var ping types.Ping
- req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil)
+ req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil)
if err != nil {
return ping, err
}
@@ -26,7 +28,5 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
}
ping.OSType = serverResp.header.Get("OSType")
}
-
- err = cli.checkResponseErr(serverResp)
- return ping, err
+ return ping, cli.checkResponseErr(serverResp)
}
diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go
index 89f39ee2c..6a6fc18df 100644
--- a/vendor/github.com/docker/docker/client/plugin_inspect.go
+++ b/vendor/github.com/docker/docker/client/plugin_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
@@ -14,10 +13,7 @@ import (
func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return nil, nil, pluginNotFoundError{name}
- }
- return nil, nil, err
+ return nil, nil, wrapResponseError(err, resp, "plugin", name)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
index 3acde3b96..78dbeb8be 100644
--- a/vendor/github.com/docker/docker/client/plugin_list.go
+++ b/vendor/github.com/docker/docker/client/plugin_list.go
@@ -23,7 +23,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P
}
resp, err := cli.get(ctx, "/plugins", query, nil)
if err != nil {
- return plugins, err
+ return plugins, wrapResponseError(err, resp, "plugin", "")
}
err = json.NewDecoder(resp.body).Decode(&plugins)
diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go
index b017e4d34..b498c4820 100644
--- a/vendor/github.com/docker/docker/client/plugin_remove.go
+++ b/vendor/github.com/docker/docker/client/plugin_remove.go
@@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types.
resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "plugin", name)
}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index 3e7d43fea..615d0b989 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -203,7 +203,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error {
return err
}
if len(body) == 0 {
- return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
+ return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
}
var ct string
diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go
index 9b602972b..6927ea96f 100644
--- a/vendor/github.com/docker/docker/client/secret_inspect.go
+++ b/vendor/github.com/docker/docker/client/secret_inspect.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
@@ -17,10 +16,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S
}
resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return swarm.Secret{}, nil, secretNotFoundError{id}
- }
- return swarm.Secret{}, nil, err
+ return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
index 0d33ecfbc..fdee6e2e0 100644
--- a/vendor/github.com/docker/docker/client/secret_list.go
+++ b/vendor/github.com/docker/docker/client/secret_list.go
@@ -18,7 +18,7 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go
index c5e37af17..9b4ee71e2 100644
--- a/vendor/github.com/docker/docker/client/secret_remove.go
+++ b/vendor/github.com/docker/docker/client/secret_remove.go
@@ -9,5 +9,5 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error {
}
resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "secret", id)
}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
index a36839443..834709d1f 100644
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -3,11 +3,12 @@ package client
import (
"encoding/json"
"fmt"
+ "strings"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
@@ -85,21 +86,30 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
return response, err
}
-func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) {
+func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
- imageWithDigest := image
var platforms []swarm.Platform
if err != nil {
return "", nil, err
}
- imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
+ imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
if len(distributionInspect.Platforms) > 0 {
platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
for _, p := range distributionInspect.Platforms {
+ // clear architecture field for arm. This is a temporary patch to address
+ // https://github.com/docker/swarmkit/issues/2294. The issue is that while
+ // image manifests report "arm" as the architecture, the node reports
+ // something like "armv7l" (includes the variant), which causes arm images
+ // to stop working with swarm mode. This patch removes the architecture
+ // constraint for arm images to ensure tasks get scheduled.
+ arch := p.Architecture
+ if strings.ToLower(arch) == "arm" {
+ arch = ""
+ }
platforms = append(platforms, swarm.Platform{
- Architecture: p.Architecture,
+ Architecture: arch,
OS: p.OS,
})
}
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
index d7e051e3a..3e9699e5e 100644
--- a/vendor/github.com/docker/docker/client/service_inspect.go
+++ b/vendor/github.com/docker/docker/client/service_inspect.go
@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
- "net/http"
"net/url"
"github.com/docker/docker/api/types"
@@ -19,10 +18,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string,
query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Service{}, nil, serviceNotFoundError{serviceID}
- }
- return swarm.Service{}, nil, err
+ return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
index c29e6d407..eb3ff9739 100644
--- a/vendor/github.com/docker/docker/client/service_list.go
+++ b/vendor/github.com/docker/docker/client/service_list.go
@@ -15,7 +15,7 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go
index a9331f92c..ad992c01d 100644
--- a/vendor/github.com/docker/docker/client/service_remove.go
+++ b/vendor/github.com/docker/docker/client/service_remove.go
@@ -6,5 +6,5 @@ import "golang.org/x/net/context"
func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "service", serviceID)
}
diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go
index bc8058fc3..dc08cedb9 100644
--- a/vendor/github.com/docker/docker/client/task_inspect.go
+++ b/vendor/github.com/docker/docker/client/task_inspect.go
@@ -4,10 +4,8 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
"github.com/docker/docker/api/types/swarm"
-
"golang.org/x/net/context"
)
@@ -15,10 +13,7 @@ import (
func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Task{}, nil, taskNotFoundError{taskID}
- }
- return swarm.Task{}, nil, err
+ return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
}
defer ensureReaderClosed(serverResp)
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
index 66324da95..01bd69525 100644
--- a/vendor/github.com/docker/docker/client/task_list.go
+++ b/vendor/github.com/docker/docker/client/task_list.go
@@ -15,7 +15,7 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions)
query := url.Values{}
if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
+ filterJSON, err := filters.ToJSON(options.Filters)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone.go b/vendor/github.com/docker/docker/client/tlsconfig_clone.go
new file mode 100644
index 000000000..99b6be1ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/tlsconfig_clone.go
@@ -0,0 +1,11 @@
+// +build go1.8
+
+package client
+
+import "crypto/tls"
+
+// tlsConfigClone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func tlsConfigClone(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go
index 0d5b448fe..b837b2ade 100644
--- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
+++ b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go
@@ -1,12 +1,12 @@
// +build go1.7,!go1.8
-package tlsconfig
+package client
import "crypto/tls"
-// Clone returns a clone of tls.Config. This function is provided for
+// tlsConfigClone returns a clone of tls.Config. This function is provided for
// compatibility for go1.7 that doesn't include this method in stdlib.
-func Clone(c *tls.Config) *tls.Config {
+func tlsConfigClone(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go
index 401ab15d3..73f6ef7b4 100644
--- a/vendor/github.com/docker/docker/client/transport.go
+++ b/vendor/github.com/docker/docker/client/transport.go
@@ -5,14 +5,6 @@ import (
"net/http"
)
-// transportFunc allows us to inject a mock transport for testing. We define it
-// here so we can detect the tlsconfig and return nil for only this type.
-type transportFunc func(*http.Request) (*http.Response, error)
-
-func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) {
- return tf(req)
-}
-
// resolveTLSConfig attempts to resolve the TLS configuration from the
// RoundTripper.
func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
index f3d8877df..137705065 100644
--- a/vendor/github.com/docker/docker/client/utils.go
+++ b/vendor/github.com/docker/docker/client/utils.go
@@ -24,7 +24,7 @@ func getDockerOS(serverHeader string) string {
func getFiltersQuery(f filters.Args) (url.Values, error) {
query := url.Values{}
if f.Len() > 0 {
- filterJSON, err := filters.ToParam(f)
+ filterJSON, err := filters.ToJSON(f)
if err != nil {
return query, err
}
diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go
index 3860e9b22..988934384 100644
--- a/vendor/github.com/docker/docker/client/volume_inspect.go
+++ b/vendor/github.com/docker/docker/client/volume_inspect.go
@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
"io/ioutil"
- "net/http"
+ "path"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
@@ -18,13 +18,17 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo
// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
+ // The empty ID needs to be handled here because with an empty ID the
+ // request url will not contain a trailing / which calls the volume list API
+ // instead of volume inspect
+ if volumeID == "" {
+ return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
+ }
+
var volume types.Volume
- resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+ resp, err := cli.get(ctx, path.Join("/volumes", volumeID), nil, nil)
if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return volume, nil, volumeNotFoundError{volumeID}
- }
- return volume, nil, err
+ return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
}
defer ensureReaderClosed(resp)
diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go
index 6c26575b4..3ffb8bcf2 100644
--- a/vendor/github.com/docker/docker/client/volume_remove.go
+++ b/vendor/github.com/docker/docker/client/volume_remove.go
@@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool
}
resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
ensureReaderClosed(resp)
- return err
+ return wrapResponseError(err, resp, "volume", volumeID)
}
diff --git a/vendor/github.com/docker/docker/hack/README.md b/vendor/github.com/docker/docker/hack/README.md
new file mode 100644
index 000000000..9e588db25
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/README.md
@@ -0,0 +1,60 @@
+## About
+
+This directory contains a collection of scripts used to build and manage this
+repository. If there are any issues regarding the intention of a particular
+script (or even part of a certain script), please reach out to us.
+It may help us either refine our current scripts, or add on new ones
+that are appropriate for a given use case.
+
+## DinD (dind.sh)
+
+DinD is a wrapper script which allows Docker to be run inside a Docker
+container. DinD requires the container to
+be run with privileged mode enabled.
+
+## Generate Authors (generate-authors.sh)
+
+Generates AUTHORS; a file with all the names and corresponding emails of
+individual contributors. AUTHORS can be found in the home directory of
+this repository.
+
+## Make
+
+There are two make files, each with different extensions. Neither are supposed
+to be called directly; only invoke `make`. Both scripts run inside a Docker
+container.
+
+### make.ps1
+
+- The Windows native build script that uses PowerShell semantics; it is limited
+unlike `hack\make.sh` since it does not provide support for the full set of
+operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
+does provide support for local Windows development and Windows to Windows CI.
+More information is found within `make.ps1` by the author, @jhowardmsft
+
+### make.sh
+
+- Referenced via `make test` when running tests on a local machine,
+or directly referenced when running tests inside a Docker development container.
+- When running on a local machine, `make test` to run all tests found in
+`test`, `test-unit`, `test-integration`, and `test-docker-py` on
+your local machine. The default timeout is set in `make.sh` to 60 minutes
+(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
+all of the tests.
+- When running inside a Docker development container, `hack/make.sh` does
+not have a single target that runs all the tests. You need to provide a
+single command line with multiple targets that performs the same thing.
+An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py`
+- For more information related to testing outside the scope of this README,
+refer to
+[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
+
+## Release (release.sh)
+
+Releases any bundles built by `make` on a public AWS S3 bucket.
+For information regarding configuration, please view `release.sh`.
+
+## Vendor (vendor.sh)
+
+A shell script that is a wrapper around Vndr. For information on how to use
+this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
new file mode 100644
index 000000000..1cea52526
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
@@ -0,0 +1,69 @@
+# Integration Testing on Swarm
+
+IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
+
+## Architecture
+
+### Master service
+
+ - Works as a funker caller
+ - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
+
+### Worker service
+
+ - Works as a funker callee
+ - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
+
+### Client
+
+ - Controls master and workers via `docker stack`
+ - No need to have a local daemon
+
+Typically, the master and workers are supposed to be running on a cloud environment,
+while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
+
+## Requirement
+
+ - Docker daemon 1.13 or later
+ - Private registry for distributed execution with multiple nodes
+
+## Usage
+
+### Step 1: Prepare images
+
+ $ make build-integration-cli-on-swarm
+
+Following environment variables are known to work in this step:
+
+ - `BUILDFLAGS`
+ - `DOCKER_INCREMENTAL_BINARY`
+
+Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
+
+### Step 2: Execute tests
+
+ $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
+
+Following environment variables are known to work in this step:
+
+ - `DOCKER_GRAPHDRIVER`
+ - `DOCKER_EXPERIMENTAL`
+
+#### Flags
+
+Basic flags:
+
+ - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
+ - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
+ - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
+
+Experimental flags for mitigating makespan nonuniformity:
+
+ - `-shuffle`: Shuffle the test filter strings
+
+Flags for debugging IT on Swarm itself:
+
+ - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
+ - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
+ - `-dry-run`: skip the actual workload
+ - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
new file mode 100644
index 000000000..efd6d6d04
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
@@ -0,0 +1,2 @@
+# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
+github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go
new file mode 100644
index 000000000..4fbd470bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/env.go
@@ -0,0 +1,48 @@
+package opts
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// ValidateEnv validates an environment variable and returns it.
+// If no value is specified, it returns the current value using os.Getenv.
+//
+// As on ParseEnvFile and related to #16585, environment variable names
+// are not validate what so ever, it's up to application inside docker
+// to validate them or not.
+//
+// The only validation here is to check if name is empty, per #25099
+func ValidateEnv(val string) (string, error) {
+ arr := strings.Split(val, "=")
+ if arr[0] == "" {
+ return "", errors.Errorf("invalid environment variable: %s", val)
+ }
+ if len(arr) > 1 {
+ return val, nil
+ }
+ if !doesEnvExist(val) {
+ return val, nil
+ }
+ return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
+}
+
+func doesEnvExist(name string) bool {
+ for _, entry := range os.Environ() {
+ parts := strings.SplitN(entry, "=", 2)
+ if runtime.GOOS == "windows" {
+ // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
+ if strings.EqualFold(parts[0], name) {
+ return true
+ }
+ }
+ if parts[0] == name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
new file mode 100644
index 000000000..594cccf2f
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts.go
@@ -0,0 +1,165 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+var (
+ // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
+ // These are the IANA registered port numbers for use with Docker
+ // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
+ DefaultHTTPPort = 2375 // Default HTTP Port
+ // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
+ DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
+ // DefaultUnixSocket Path for the unix socket.
+ // Docker daemon by default always listens on the default unix socket
+ DefaultUnixSocket = "/var/run/docker.sock"
+ // DefaultTCPHost constant defines the default host string used by docker on Windows
+ DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+ // DefaultTLSHost constant defines the default host string used by docker for TLS sockets
+ DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
+ // DefaultNamedPipe defines the default named pipe used by docker on Windows
+ DefaultNamedPipe = `//./pipe/docker_engine`
+)
+
+// ValidateHost validates that the specified string is a valid host and returns it.
+func ValidateHost(val string) (string, error) {
+ host := strings.TrimSpace(val)
+ // The empty string means default and is not handled by parseDockerDaemonHost
+ if host != "" {
+ _, err := parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ // Note: unlike most flag validators, we don't return the mutated value here
+ // we need to know what the user entered later (using ParseHost) to adjust for TLS
+ return val, nil
+}
+
+// ParseHost and set defaults for a Daemon host string
+func ParseHost(defaultToTLS bool, val string) (string, error) {
+ host := strings.TrimSpace(val)
+ if host == "" {
+ if defaultToTLS {
+ host = DefaultTLSHost
+ } else {
+ host = DefaultHost
+ }
+ } else {
+ var err error
+ host, err = parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ return host, nil
+}
+
+// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDockerDaemonHost(addr string) (string, error) {
+ addrParts := strings.SplitN(addr, "://", 2)
+ if len(addrParts) == 1 && addrParts[0] != "" {
+ addrParts = []string{"tcp", addrParts[0]}
+ }
+
+ switch addrParts[0] {
+ case "tcp":
+ return ParseTCPAddr(addrParts[1], DefaultTCPHost)
+ case "unix":
+ return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+ case "npipe":
+ return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
+ case "fd":
+ return addr, nil
+ default:
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+}
+
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, proto+"://")
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
+ }
+ if addr == "" {
+ addr = defaultAddr
+ }
+ return fmt.Sprintf("%s://%s", proto, addr), nil
+}
+
+// ParseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
+// tryAddr is expected to have already been Trim()'d
+// defaultAddr must be in the full `tcp://host:port` form
+func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
+ if tryAddr == "" || tryAddr == "tcp://" {
+ return defaultAddr, nil
+ }
+ addr := strings.TrimPrefix(tryAddr, "tcp://")
+ if strings.Contains(addr, "://") || addr == "" {
+ return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
+ }
+
+ defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
+ defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
+ if err != nil {
+ return "", err
+ }
+ // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
+ // not 1.4. See https://github.com/golang/go/issues/12200 and
+ // https://github.com/golang/go/issues/6530.
+ if strings.HasSuffix(addr, "]:") {
+ addr += defaultPort
+ }
+
+ u, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", err
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // try port addition once
+ host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
+ }
+ if err != nil {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ if host == "" {
+ host = defaultHost
+ }
+ if port == "" {
+ port = defaultPort
+ }
+ p, err := strconv.Atoi(port)
+ if err != nil && p == 0 {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
+}
+
+// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
+// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
+func ValidateExtraHost(val string) (string, error) {
+ // allow for IPv6 addresses in extra hosts by only splitting on first ":"
+ arr := strings.SplitN(val, ":", 2)
+ if len(arr) != 2 || len(arr[0]) == 0 {
+ return "", fmt.Errorf("bad format for add-host: %q", val)
+ }
+ if _, err := ValidateIPAddress(arr[1]); err != nil {
+ return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
+ }
+ return val, nil
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
new file mode 100644
index 000000000..611407a9d
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package opts
+
+import "fmt"
+
+// DefaultHost constant defines the default host string used by docker on other hosts than Windows
+var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 000000000..7c239e00f
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,6 @@
+// +build windows
+
+package opts
+
+// DefaultHost constant defines the default host string used by docker on Windows
+var DefaultHost = "npipe://" + DefaultNamedPipe
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
new file mode 100644
index 000000000..109506397
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,47 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+)
+
+// IPOpt holds an IP. It is used to store values from CLI flags.
+type IPOpt struct {
+ *net.IP
+}
+
+// NewIPOpt creates a new IPOpt from a reference net.IP and a
+// string representation of an IP. If the string is not a valid
+// IP it will fallback to the specified reference.
+func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
+ o := &IPOpt{
+ IP: ref,
+ }
+ o.Set(defaultVal)
+ return o
+}
+
+// Set sets an IPv4 or IPv6 address from a given string. If the given
+// string is not parsable as an IP address it returns an error.
+func (o *IPOpt) Set(val string) error {
+ ip := net.ParseIP(val)
+ if ip == nil {
+ return fmt.Errorf("%s is not an ip address", val)
+ }
+ *o.IP = ip
+ return nil
+}
+
+// String returns the IP address stored in the IPOpt. If stored IP is a
+// nil pointer, it returns an empty string.
+func (o *IPOpt) String() string {
+ if *o.IP == nil {
+ return ""
+ }
+ return o.IP.String()
+}
+
+// Type returns the type of the option
+func (o *IPOpt) Type() string {
+ return "ip"
+}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
new file mode 100644
index 000000000..a86d74d60
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,327 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "path"
+ "regexp"
+ "strings"
+
+ units "github.com/docker/go-units"
+)
+
+var (
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+)
+
+// ListOpts holds a list of values and a validation function.
+type ListOpts struct {
+ values *[]string
+ validator ValidatorFctType
+}
+
+// NewListOpts creates a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+ var values []string
+ return *NewListOptsRef(&values, validator)
+}
+
+// NewListOptsRef creates a new ListOpts with the specified values and validator.
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+ return &ListOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+func (opts *ListOpts) String() string {
+ if len(*opts.values) == 0 {
+ return ""
+ }
+ return fmt.Sprintf("%v", *opts.values)
+}
+
+// Set validates if needed the input value and adds it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ (*opts.values) = append((*opts.values), value)
+ return nil
+}
+
+// Delete removes the specified element from the slice.
+func (opts *ListOpts) Delete(key string) {
+ for i, k := range *opts.values {
+ if k == key {
+ (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+ return
+ }
+ }
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+func (opts *ListOpts) GetMap() map[string]struct{} {
+ ret := make(map[string]struct{})
+ for _, k := range *opts.values {
+ ret[k] = struct{}{}
+ }
+ return ret
+}
+
+// GetAll returns the values of slice.
+func (opts *ListOpts) GetAll() []string {
+ return (*opts.values)
+}
+
+// GetAllOrEmpty returns the values of the slice
+// or an empty slice when there are no values.
+func (opts *ListOpts) GetAllOrEmpty() []string {
+ v := *opts.values
+ if v == nil {
+ return make([]string, 0)
+ }
+ return v
+}
+
+// Get checks the existence of the specified key.
+func (opts *ListOpts) Get(key string) bool {
+ for _, k := range *opts.values {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+ return len((*opts.values))
+}
+
+// Type returns a string name for this Option type
+func (opts *ListOpts) Type() string {
+ return "list"
+}
+
+// WithValidator returns the ListOpts with validator set.
+func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
+ opts.validator = validator
+ return opts
+}
+
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+ Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+ name string
+ ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+ return &NamedListOpts{
+ name: name,
+ ListOpts: *NewListOptsRef(values, validator),
+ }
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+ return o.name
+}
+
+// MapOpts holds a map of values and a validation function.
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+// GetAll returns the values of MapOpts as a map.
+func (opts *MapOpts) GetAll() map[string]string {
+ return opts.values
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", opts.values)
+}
+
+// Type returns a string name for this Option type
+func (opts *MapOpts) Type() string {
+ return "map"
+}
+
+// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+ if values == nil {
+ values = make(map[string]string)
+ }
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+ name string
+ MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+ return &NamedMapOpts{
+ name: name,
+ MapOpts: *NewMapOpts(values, validator),
+ }
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+ return o.name
+}
+
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateIPAddress validates an Ip address.
+func ValidateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateDNSSearch validates domain for resolvconf search configuration.
+// A zero length domain is represented by a dot (.).
+func ValidateDNSSearch(val string) (string, error) {
+ if val = strings.Trim(val, " "); val == "." {
+ return val, nil
+ }
+ return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+ if alphaRegexp.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ ns := domainRegexp.FindSubmatch([]byte(val))
+ if len(ns) > 0 && len(ns[1]) < 255 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateLabel validates that the specified string is a valid label, and returns it.
+// Labels are in the form on key=value.
+func ValidateLabel(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("bad attribute format: %s", val)
+ }
+ return val, nil
+}
+
+// ParseLink parses and validates the specified string as a link format (name:alias)
+func ParseLink(val string) (string, string, error) {
+ if val == "" {
+ return "", "", fmt.Errorf("empty string specified for links")
+ }
+ arr := strings.Split(val, ":")
+ if len(arr) > 2 {
+ return "", "", fmt.Errorf("bad format for links: %s", val)
+ }
+ if len(arr) == 1 {
+ return val, val, nil
+ }
+ // This is kept because we can actually get a HostConfig with links
+ // from an already created container and the format is not `foo:bar`
+ // but `/foo:/c1/bar`
+ if strings.HasPrefix(arr[0], "/") {
+ _, alias := path.Split(arr[1])
+ return arr[0][1:], alias, nil
+ }
+ return arr[0], arr[1], nil
+}
+
+// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
+type MemBytes int64
+
+// String returns the string format of the human readable memory bytes
+func (m *MemBytes) String() string {
+ // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
+ // We return "0" in case value is 0 here so that the default value is hidden.
+ // (Sometimes "default 0 B" is actually misleading)
+ if m.Value() != 0 {
+ return units.BytesSize(float64(m.Value()))
+ }
+ return "0"
+}
+
+// Set sets the value of the MemBytes by passing a string
+func (m *MemBytes) Set(value string) error {
+ val, err := units.RAMInBytes(value)
+ *m = MemBytes(val)
+ return err
+}
+
+// Type returns the type
+func (m *MemBytes) Type() string {
+ return "bytes"
+}
+
+// Value returns the value in int64
+func (m *MemBytes) Value() int64 {
+ return int64(*m)
+}
+
+// UnmarshalJSON is the customized unmarshaler for MemBytes
+func (m *MemBytes) UnmarshalJSON(s []byte) error {
+ if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return fmt.Errorf("invalid size: %q", s)
+ }
+ val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
+ *m = MemBytes(val)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go
new file mode 100644
index 000000000..2766a43a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package opts
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
+const DefaultHTTPHost = "localhost"
diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go
new file mode 100644
index 000000000..98b7251a9
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_windows.go
@@ -0,0 +1,56 @@
+package opts
+
+// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
+// @jhowardmsft, @swernli.
+//
+// On Windows, this mitigates a problem with the default options of running
+// a docker client against a local docker daemon on TP5.
+//
+// What was found that if the default host is "localhost", even if the client
+// (and daemon as this is local) is not physically on a network, and the DNS
+// cache is flushed (ipconfig /flushdns), then the client will pause for
+// exactly one second when connecting to the daemon for calls. For example
+// using docker run windowsservercore cmd, the CLI will send a create followed
+// by an attach. You see the delay between the attach finishing and the attach
+// being seen by the daemon.
+//
+// Here's some daemon debug logs with additional debug spew put in. The
+// AfterWriteJSON log is the very last thing the daemon does as part of the
+// create call. The POST /attach is the second CLI call. Notice the second
+// time gap.
+//
+// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
+// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
+// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
+// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
+// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
+// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
+// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
+// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
+// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
+// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
+// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
+// ... 1 second gap here....
+// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
+// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
+//
+// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
+// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
+// the Windows networking stack is supposed to resolve "localhost" internally,
+// without hitting DNS, or even reading the hosts file (which is why localhost
+// is commented out in the hosts file on Windows).
+//
+// We have validated that working around this using the actual IPv4 localhost
+// address does not cause the delay.
+//
+// This does not occur with the docker client built with 1.4.3 on the same
+// Windows build, regardless of whether the daemon is built using 1.5.1
+// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
+// on a cross-compiled Windows binary (from Linux).
+//
+// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
+// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
+// explicitly.
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
+const DefaultHTTPHost = "127.0.0.1"
diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go
new file mode 100644
index 000000000..09c68a526
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/quotedstring.go
@@ -0,0 +1,37 @@
+package opts
+
+// QuotedString is a string that may have extra quotes around the value. The
+// quotes are stripped from the value.
+type QuotedString struct {
+ value *string
+}
+
+// Set sets a new value
+func (s *QuotedString) Set(val string) error {
+ *s.value = trimQuotes(val)
+ return nil
+}
+
+// Type returns the type of the value
+func (s *QuotedString) Type() string {
+ return "string"
+}
+
+func (s *QuotedString) String() string {
+ return *s.value
+}
+
+func trimQuotes(value string) string {
+ lastIndex := len(value) - 1
+ for _, char := range []byte{'\'', '"'} {
+ if value[0] == char && value[lastIndex] == char {
+ return value[1:lastIndex]
+ }
+ }
+ return value
+}
+
+// NewQuotedString returns a new quoted string option
+func NewQuotedString(value *string) *QuotedString {
+ return &QuotedString{value: value}
+}
diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go
new file mode 100644
index 000000000..4361b3ce0
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/runtime.go
@@ -0,0 +1,79 @@
+package opts
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+)
+
+// RuntimeOpt defines a map of Runtimes
+type RuntimeOpt struct {
+ name string
+ stockRuntimeName string
+ values *map[string]types.Runtime
+}
+
+// NewNamedRuntimeOpt creates a new RuntimeOpt
+func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
+ if ref == nil {
+ ref = &map[string]types.Runtime{}
+ }
+ return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *RuntimeOpt) Name() string {
+ return o.name
+}
+
+// Set validates and updates the list of Runtimes
+func (o *RuntimeOpt) Set(val string) error {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("invalid runtime argument: %s", val)
+ }
+
+ parts[0] = strings.TrimSpace(parts[0])
+ parts[1] = strings.TrimSpace(parts[1])
+ if parts[0] == "" || parts[1] == "" {
+ return fmt.Errorf("invalid runtime argument: %s", val)
+ }
+
+ parts[0] = strings.ToLower(parts[0])
+ if parts[0] == o.stockRuntimeName {
+ return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
+ }
+
+ if _, ok := (*o.values)[parts[0]]; ok {
+ return fmt.Errorf("runtime '%s' was already defined", parts[0])
+ }
+
+ (*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
+
+ return nil
+}
+
+// String returns Runtime values as a string.
+func (o *RuntimeOpt) String() string {
+ var out []string
+ for k := range *o.values {
+ out = append(out, k)
+ }
+
+ return fmt.Sprintf("%v", out)
+}
+
+// GetMap returns a map of Runtimes (name: path)
+func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
+ if o.values != nil {
+ return *o.values
+ }
+
+ return map[string]types.Runtime{}
+}
+
+// Type returns the type of the option
+func (o *RuntimeOpt) Type() string {
+ return "runtime"
+}
diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go
new file mode 100644
index 000000000..a2a65fcd2
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ulimit.go
@@ -0,0 +1,81 @@
+package opts
+
+import (
+ "fmt"
+
+ "github.com/docker/go-units"
+)
+
+// UlimitOpt defines a map of Ulimits
+type UlimitOpt struct {
+ values *map[string]*units.Ulimit
+}
+
+// NewUlimitOpt creates a new UlimitOpt
+func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt {
+ if ref == nil {
+ ref = &map[string]*units.Ulimit{}
+ }
+ return &UlimitOpt{ref}
+}
+
+// Set validates a Ulimit and sets its name as a key in UlimitOpt
+func (o *UlimitOpt) Set(val string) error {
+ l, err := units.ParseUlimit(val)
+ if err != nil {
+ return err
+ }
+
+ (*o.values)[l.Name] = l
+
+ return nil
+}
+
+// String returns Ulimit values as a string.
+func (o *UlimitOpt) String() string {
+ var out []string
+ for _, v := range *o.values {
+ out = append(out, v.String())
+ }
+
+ return fmt.Sprintf("%v", out)
+}
+
+// GetList returns a slice of pointers to Ulimits.
+func (o *UlimitOpt) GetList() []*units.Ulimit {
+ var ulimits []*units.Ulimit
+ for _, v := range *o.values {
+ ulimits = append(ulimits, v)
+ }
+
+ return ulimits
+}
+
+// Type returns the option type
+func (o *UlimitOpt) Type() string {
+ return "ulimit"
+}
+
+// NamedUlimitOpt defines a named map of Ulimits
+type NamedUlimitOpt struct {
+ name string
+ UlimitOpt
+}
+
+var _ NamedOption = &NamedUlimitOpt{}
+
+// NewNamedUlimitOpt creates a new NamedUlimitOpt
+func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt {
+ if ref == nil {
+ ref = &map[string]*units.Ulimit{}
+ }
+ return &NamedUlimitOpt{
+ name: name,
+ UlimitOpt: *NewUlimitOpt(ref),
+ }
+}
+
+// Name returns the option name
+func (o *NamedUlimitOpt) Name() string {
+ return o.name
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 000000000..7307d9694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 000000000..aa5563756
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1237 @@
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.IDPair
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ }
+)
+
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMappingsVar *idtools.IDMappings
+}
+
+// NewDefaultArchiver returns a new Archiver without any IDMappings
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}}
+}
+
+// breakoutError is used to differentiate errors related to breaking out
+// When testing archive breakout in the unit tests, this error is expected
+// in order for the test to pass.
+type breakoutError error
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+const (
+ modeISDIR = 040000 // Directory
+ modeISFIFO = 010000 // FIFO
+ modeISREG = 0100000 // Regular file
+ modeISLNK = 0120000 // Symbolic link
+ modeISBLK = 060000 // Block special file
+ modeISCHR = 020000 // Character special file
+ modeISSOCK = 0140000 // Socket
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debug("Len too short")
+ continue
+ }
+ if bytes.Equal(m, source[:len(m)]) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, chdone, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
+ <-chdone
+ return readBufWrapper.Close()
+ }), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
+// modify the contents or header of an entry in the archive. If the file already
+// exists in the archive the TarModifierFunc will be called with the Header and
+// a reader which will return the files content. If the file does not exist both
+// header and content will be nil.
+type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
+// tar stream are modified if they match any of the keys in mods.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+
+ go func() {
+ tarReader := tar.NewReader(inputTarStream)
+ tarWriter := tar.NewWriter(pipeWriter)
+ defer inputTarStream.Close()
+ defer tarWriter.Close()
+
+ modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
+ header, data, err := modifier(name, original, tarReader)
+ switch {
+ case err != nil:
+ return err
+ case header == nil:
+ return nil
+ }
+
+ header.Name = name
+ header.Size = int64(len(data))
+ if err := tarWriter.WriteHeader(header); err != nil {
+ return err
+ }
+ if len(data) != 0 {
+ if _, err := tarWriter.Write(data); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var err error
+ var originalHeader *tar.Header
+ for {
+ originalHeader, err = tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+
+ modifier, ok := mods[originalHeader.Name]
+ if !ok {
+ // No modifiers for this file, copy the header and data
+ if err := tarWriter.WriteHeader(originalHeader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ if _, err := pools.Copy(tarWriter, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ continue
+ }
+ delete(mods, originalHeader.Name)
+
+ if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ // Apply the modifiers that haven't matched any files in the archive
+ for name, modifier := range mods {
+ if err := modify(name, nil, modifier, nil); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ pipeWriter.Close()
+
+ }()
+ return pipeReader
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+// FileInfoHeader creates a populated Header from fi.
+// Compared to archive pkg this function fills in more information.
+// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
+// which have been deleted since Go 1.9 archive/tar.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+ if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
+ return nil, err
+ }
+ return hdr, nil
+}
+
+// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
+// https://github.com/golang/go/commit/66b5a2f
+func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
+ fm := fi.Mode()
+ switch {
+ case fm.IsRegular():
+ mode |= modeISREG
+ case fi.IsDir():
+ mode |= modeISDIR
+ case fm&os.ModeSymlink != 0:
+ mode |= modeISLNK
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ mode |= modeISCHR
+ } else {
+ mode |= modeISBLK
+ }
+ case fm&os.ModeNamedPipe != 0:
+ mode |= modeISFIFO
+ case fm&os.ModeSocket != 0:
+ mode |= modeISSOCK
+ }
+ return mode
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+ return nil
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IDMappings *idtools.IDMappings
+ ChownOpts *idtools.IDPair
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ IDMappings: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use system.OpenSequential to ensure we use sequential file
+ // access on Windows to avoid depleting the standby list.
+ // On Linux, this equates to a regular os.Open.
+ file, err := system.OpenSequential(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file. We use system.OpenFileSequential to use sequential
+ // file access to avoid depleting the standby list on Windows.
+ // On Linux, this equates to a regular os.OpenFile
+ file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar:
+ if inUserns { // cannot create devices in a userns
+ return nil
+ }
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ errors = append(errors, err.Error())
+ continue
+ }
+ return err
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := newTarAppender(
+ idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ compressWriter,
+ options.ChownOpts,
+ )
+ ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = pm.Matches(relFilePath)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMappings.RootPair()
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := remapIDs(idMappings, hdr); err != nil {
+ return err
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMappingsVar.UIDs(),
+ GIDMaps: archiver.IDMappingsVar.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMappingsVar.UIDs(),
+ GIDMaps: archiver.IDMappingsVar.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this Archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootIDs := archiver.IDMappingsVar.RootPair()
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := make(chan error, 1)
+
+ go func() {
+ defer close(errC)
+
+ errC <- func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ }()
+ }()
+ defer func() {
+ if er := <-errC; err == nil && er != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// IDMappings returns the IDMappings of the archiver.
+func (archiver *Archiver) IDMappings() *idtools.IDMappings {
+ return archiver.IDMappingsVar
+}
+
+func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
+ ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
+ hdr.Uid, hdr.Gid = ids.UID, ids.GID
+ return err
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ chdone := make(chan struct{})
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, nil, err
+ }
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(chdone)
+ }()
+
+ return pipeR, chdone, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 000000000..6e950e93c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,92 @@
+package archive
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ if hdr.Xattrs != nil {
+ delete(hdr.Xattrs, "trusted.overlay.opaque")
+ }
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ wo = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return
+}
+
+func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ return false, err
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 000000000..54acbf285
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 000000000..ac4a348d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,122 @@
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/system"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
+ "golang.org/x/sys/unix"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return srcPath + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ // Currently go does not fill in the major/minors
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
+ hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
+ }
+ }
+
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ inode = s.Ino
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ if rsystem.RunningInUserNS() {
+ // cannot create a device if running in user namespace
+ return nil
+ }
+
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= unix.S_IFBLK
+ case tar.TypeChar:
+ mode |= unix.S_IFCHR
+ case tar.TypeFifo:
+ mode |= unix.S_IFIFO
+ }
+
+ return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 000000000..a22410c03
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,79 @@
+// +build windows
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ permPart := perm & os.ModePerm
+ noPermPart := perm &^ os.ModePerm
+ // Add the x bit: make everything +x from windows
+ permPart |= 0111
+ permPart &= 0755
+
+ return noPermPart | permPart
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ // do nothing. no notion of Rdev, Nlink in stat on Windows
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.IDPair{0, 0}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 000000000..d78fe6ac6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,441 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ !bytes.Equal(oldChild.capability, newChild.capability) {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 000000000..8e96d961f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,313 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of unix.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+// OverlayChanges walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func OverlayChanges(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, overlayDeletedFile, nil)
+}
+
+func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ if fi.Mode()&os.ModeCharDevice != 0 {
+ s := fi.Sys().(*syscall.Stat_t)
+ if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+ return path, nil
+ }
+ }
+ if fi.Mode()&os.ModeDir != 0 {
+ opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
+ if err != nil {
+ return "", err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ return path, nil
+ }
+ }
+
+ return "", nil
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 000000000..da70ed37c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 000000000..7aa1226d7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,37 @@
+// +build !windows
+
+package archive
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return fi.Sys().(*syscall.Stat_t).Ino
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 000000000..6fd353269
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,30 @@
+package archive
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mtim() != newStat.Mtim() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 000000000..d1e036d5c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,472 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in the separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
+ originalPath = strings.Replace(originalPath, "/", string(sep), -1)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath, sep) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(sep)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
+ cleanedPath += string(sep)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string, sep byte) bool {
+ return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string, sep byte) bool {
+ return len(path) > 0 && path[len(path)-1] == sep
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(filepath.FromSlash(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(os.PathSeparator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+ opts := TarResourceRebaseOpts(sourceBase, rebaseName)
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+ return TarWithOptions(sourceDir, opts)
+}
+
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions (the TarOptions struct)
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+ filter := []string{sourceBase}
+ return &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ }
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Stat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path, os.PathSeparator):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
+ }
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// TODO @gupta-ak. These might have to be changed in the future to be
+// continuity driver aware as well to support LCOW.
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) &&
+ !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 000000000..e305b5e4a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 000000000..2b775b45c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 000000000..019facd38
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,256 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600, "")
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := remapIDs(idMappings, srcHdr); err != nil {
+ return 0, err
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ if decompress {
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 000000000..3448569b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 000000000..e85aac054
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 000000000..d20478a10
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 000000000..b39d12c87
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 000000000..a129e654e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,298 @@
+package fileutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "text/scanner"
+
+ "github.com/sirupsen/logrus"
+)
+
+// PatternMatcher allows checking paths agaist a list of patterns
+type PatternMatcher struct {
+ patterns []*Pattern
+ exclusions bool
+}
+
+// NewPatternMatcher creates a new matcher object for specific patterns that can
+// be used later to match against patterns against paths
+func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
+ pm := &PatternMatcher{
+ patterns: make([]*Pattern, 0, len(patterns)),
+ }
+ for _, p := range patterns {
+ // Eliminate leading and trailing whitespace.
+ p = strings.TrimSpace(p)
+ if p == "" {
+ continue
+ }
+ p = filepath.Clean(p)
+ newp := &Pattern{}
+ if p[0] == '!' {
+ if len(p) == 1 {
+ return nil, errors.New("illegal exclusion pattern: \"!\"")
+ }
+ newp.exclusion = true
+ p = p[1:]
+ pm.exclusions = true
+ }
+ // Do some syntax checking on the pattern.
+ // filepath's Match() has some really weird rules that are inconsistent
+ // so instead of trying to dup their logic, just call Match() for its
+ // error state and if there is an error in the pattern return it.
+ // If this becomes an issue we can remove this since its really only
+ // needed in the error (syntax) case - which isn't really critical.
+ if _, err := filepath.Match(p, "."); err != nil {
+ return nil, err
+ }
+ newp.cleanedPattern = p
+ newp.dirs = strings.Split(p, string(os.PathSeparator))
+ pm.patterns = append(pm.patterns, newp)
+ }
+ return pm, nil
+}
+
+// Matches matches path against all the patterns. Matches is not safe to be
+// called concurrently
+func (pm *PatternMatcher) Matches(file string) (bool, error) {
+ matched := false
+ file = filepath.FromSlash(file)
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
+
+ for _, pattern := range pm.patterns {
+ negative := false
+
+ if pattern.exclusion {
+ negative = true
+ }
+
+ match, err := pattern.match(file)
+ if err != nil {
+ return false, err
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(pattern.dirs) <= len(parentPathDirs) {
+ match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
+ }
+ }
+
+ if match {
+ matched = !negative
+ }
+ }
+
+ if matched {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return matched, nil
+}
+
+// Exclusions returns true if any of the patterns define exclusions
+func (pm *PatternMatcher) Exclusions() bool {
+ return pm.exclusions
+}
+
+// Patterns returns array of active patterns
+func (pm *PatternMatcher) Patterns() []*Pattern {
+ return pm.patterns
+}
+
+// Pattern defines a single regexp used used to filter file paths.
+type Pattern struct {
+ cleanedPattern string
+ dirs []string
+ regexp *regexp.Regexp
+ exclusion bool
+}
+
+func (p *Pattern) String() string {
+ return p.cleanedPattern
+}
+
+// Exclusion returns true if this pattern defines exclusion
+func (p *Pattern) Exclusion() bool {
+ return p.exclusion
+}
+
+func (p *Pattern) match(path string) (bool, error) {
+
+ if p.regexp == nil {
+ if err := p.compile(); err != nil {
+ return false, filepath.ErrBadPattern
+ }
+ }
+
+ b := p.regexp.MatchString(path)
+
+ return b, nil
+}
+
+func (p *Pattern) compile() error {
+ regStr := "^"
+ pattern := p.cleanedPattern
+ // Go through the pattern and convert it to a regexp.
+ // We use a scanner so we can support utf-8 chars.
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(pattern))
+
+ sl := string(os.PathSeparator)
+ escSL := sl
+ if sl == `\` {
+ escSL += `\`
+ }
+
+ for scan.Peek() != scanner.EOF {
+ ch := scan.Next()
+
+ if ch == '*' {
+ if scan.Peek() == '*' {
+ // is some flavor of "**"
+ scan.Next()
+
+ // Treat **/ as ** so eat the "/"
+ if string(scan.Peek()) == sl {
+ scan.Next()
+ }
+
+ if scan.Peek() == scanner.EOF {
+ // is "**EOF" - to align with .gitignore just accept all
+ regStr += ".*"
+ } else {
+ // is "**"
+ // Note that this allows for any # of /'s (even 0) because
+ // the .* will eat everything, even /'s
+ regStr += "(.*" + escSL + ")?"
+ }
+ } else {
+ // is "*" so map it to anything but "/"
+ regStr += "[^" + escSL + "]*"
+ }
+ } else if ch == '?' {
+ // "?" is any char except "/"
+ regStr += "[^" + escSL + "]"
+ } else if ch == '.' || ch == '$' {
+ // Escape some regexp special chars that have no meaning
+ // in golang's filepath.Match
+ regStr += `\` + string(ch)
+ } else if ch == '\\' {
+ // escape next char. Note that a trailing \ in the pattern
+ // will be left alone (but need to escape it)
+ if sl == `\` {
+ // On windows map "\" to "\\", meaning an escaped backslash,
+ // and then just continue because filepath.Match on
+ // Windows doesn't allow escaping at all
+ regStr += escSL
+ continue
+ }
+ if scan.Peek() != scanner.EOF {
+ regStr += `\` + string(scan.Next())
+ } else {
+ regStr += `\`
+ }
+ } else {
+ regStr += string(ch)
+ }
+ }
+
+ regStr += "$"
+
+ re, err := regexp.Compile(regStr)
+ if err != nil {
+ return err
+ }
+
+ p.regexp = re
+ return nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+ pm, err := NewPatternMatcher(patterns)
+ if err != nil {
+ return false, err
+ }
+ file = filepath.Clean(file)
+
+ if file == "." {
+ // Don't let them exclude everything, kind of silly.
+ return false, nil
+ }
+
+ return pm.Matches(file)
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and removes
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+ cleanSrc := filepath.Clean(src)
+ cleanDst := filepath.Clean(dst)
+ if cleanSrc == cleanDst {
+ return 0, nil
+ }
+ sf, err := os.Open(cleanSrc)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(cleanDst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+ var realPath string
+ var err error
+ if realPath, err = filepath.Abs(path); err != nil {
+ return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ }
+ if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+ return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ }
+ realPathInfo, err := os.Stat(realPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ }
+ if !realPathInfo.Mode().IsDir() {
+ return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+ }
+ return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
new file mode 100644
index 000000000..ccd648fac
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
@@ -0,0 +1,27 @@
+package fileutils
+
+import (
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+// GetTotalUsedFds returns the number of used File Descriptors by
+// executing `lsof -p PID`
+func GetTotalUsedFds() int {
+ pid := os.Getpid()
+
+ cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return -1
+ }
+
+ outputStr := strings.TrimSpace(string(output))
+
+ fds := strings.Split(outputStr, "\n")
+
+ return len(fds) - 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
new file mode 100644
index 000000000..0f2cb7ab9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors.
+// On Solaris these limits are per process and not systemwide
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
new file mode 100644
index 000000000..9e0e97bd6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
@@ -0,0 +1,22 @@
+// +build linux freebsd
+
+package fileutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/sirupsen/logrus"
+)
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
new file mode 100644
index 000000000..5ec21cace
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
+// on Windows.
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
new file mode 100644
index 000000000..6cfa46483
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -0,0 +1,317 @@
+package jsonmessage
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ gotty "github.com/Nvveen/Gotty"
+ "github.com/docker/docker/pkg/term"
+ units "github.com/docker/go-units"
+)
+
+// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
+// ensure the formatted time isalways the same number of characters.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// JSONError wraps a concrete Code and Message, `Code` is
+// is an integer error code, `Message` is the error message.
+type JSONError struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+ return e.Message
+}
+
+// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
+// Start is the initial value for the operation. Current is the current status and
+// value of the progress made towards Total. Total is the end value describing when
+// we made 100% progress for an operation.
+type JSONProgress struct {
+ terminalFd uintptr
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ Start int64 `json:"start,omitempty"`
+ // If true, don't show xB/yB
+ HideCounts bool `json:"hidecounts,omitempty"`
+ Units string `json:"units,omitempty"`
+}
+
+func (p *JSONProgress) String() string {
+ var (
+ width = 200
+ pbBox string
+ numbersBox string
+ timeLeftBox string
+ )
+
+ ws, err := term.GetWinsize(p.terminalFd)
+ if err == nil {
+ width = int(ws.Width)
+ }
+
+ if p.Current <= 0 && p.Total <= 0 {
+ return ""
+ }
+ if p.Total <= 0 {
+ switch p.Units {
+ case "":
+ current := units.HumanSize(float64(p.Current))
+ return fmt.Sprintf("%8v", current)
+ default:
+ return fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+ if percentage > 50 {
+ percentage = 50
+ }
+ if width > 110 {
+ // this number can't be negative gh#7136
+ numSpaces := 0
+ if 50-percentage > 0 {
+ numSpaces = 50 - percentage
+ }
+ pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+ }
+
+ switch {
+ case p.HideCounts:
+ case p.Units == "": // no units, use bytes
+ current := units.HumanSize(float64(p.Current))
+ total := units.HumanSize(float64(p.Total))
+
+ numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%8v", current)
+ }
+ default:
+ numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
+ }
+ }
+
+ if p.Current > 0 && p.Start > 0 && percentage < 50 {
+ fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
+ perEntry := fromStart / time.Duration(p.Current)
+ left := time.Duration(p.Total-p.Current) * perEntry
+ left = (left / time.Second) * time.Second
+
+ if width > 50 {
+ timeLeftBox = " " + left.String()
+ }
+ }
+ return pbBox + numbersBox + timeLeftBox
+}
+
+// JSONMessage defines a message struct. It describes
+// the created time, where it from, status, ID of the
+// message. It's used for docker events.
+type JSONMessage struct {
+ Stream string `json:"stream,omitempty"`
+ Status string `json:"status,omitempty"`
+ Progress *JSONProgress `json:"progressDetail,omitempty"`
+ ProgressMessage string `json:"progress,omitempty"` //deprecated
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+ Error *JSONError `json:"errorDetail,omitempty"`
+ ErrorMessage string `json:"error,omitempty"` //deprecated
+ // Aux contains out-of-band data, such as digests for push signing and image id after building.
+ Aux *json.RawMessage `json:"aux,omitempty"`
+}
+
+/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
+type termInfo interface {
+ Parse(attr string, params ...interface{}) (string, error)
+}
+
+type noTermInfo struct{} // canary used when no terminfo.
+
+func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
+ return "", fmt.Errorf("noTermInfo")
+}
+
+func clearLine(out io.Writer, ti termInfo) {
+ // el2 (clear whole line) is not exposed by terminfo.
+
+ // First clear line from beginning to cursor
+ if attr, err := ti.Parse("el1"); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[1K")
+ }
+ // Then clear line from cursor to end
+ if attr, err := ti.Parse("el"); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[K")
+ }
+}
+
+func cursorUp(out io.Writer, ti termInfo, l int) {
+ if l == 0 { // Should never be the case, but be tolerant
+ return
+ }
+ if attr, err := ti.Parse("cuu", l); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[%dA", l)
+ }
+}
+
+func cursorDown(out io.Writer, ti termInfo, l int) {
+ if l == 0 { // Should never be the case, but be tolerant
+ return
+ }
+ if attr, err := ti.Parse("cud", l); err == nil {
+ fmt.Fprintf(out, "%s", attr)
+ } else {
+ fmt.Fprintf(out, "\x1b[%dB", l)
+ }
+}
+
+// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
+// is a terminal. If this is the case, it will erase the entire current line
+// when displaying the progressbar.
+func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
+ if jm.Error != nil {
+ if jm.Error.Code == 401 {
+ return fmt.Errorf("authentication is required")
+ }
+ return jm.Error
+ }
+ var endl string
+ if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
+ clearLine(out, termInfo)
+ endl = "\r"
+ fmt.Fprintf(out, endl)
+ } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+ return nil
+ }
+ if jm.TimeNano != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
+ } else if jm.Time != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
+ }
+ if jm.ID != "" {
+ fmt.Fprintf(out, "%s: ", jm.ID)
+ }
+ if jm.From != "" {
+ fmt.Fprintf(out, "(from %s) ", jm.From)
+ }
+ if jm.Progress != nil && termInfo != nil {
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ } else if jm.ProgressMessage != "" { //deprecated
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ } else if jm.Stream != "" {
+ fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ } else {
+ fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ }
+ return nil
+}
+
+// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
+// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
+// each line and move the cursor while displaying.
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
+ var (
+ dec = json.NewDecoder(in)
+ ids = make(map[string]int)
+ )
+
+ var termInfo termInfo
+
+ if isTerminal {
+ term := os.Getenv("TERM")
+ if term == "" {
+ term = "vt102"
+ }
+
+ var err error
+ if termInfo, err = gotty.OpenTermInfo(term); err != nil {
+ termInfo = &noTermInfo{}
+ }
+ }
+
+ for {
+ diff := 0
+ var jm JSONMessage
+ if err := dec.Decode(&jm); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if jm.Aux != nil {
+ if auxCallback != nil {
+ auxCallback(jm.Aux)
+ }
+ continue
+ }
+
+ if jm.Progress != nil {
+ jm.Progress.terminalFd = terminalFd
+ }
+ if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+ line, ok := ids[jm.ID]
+ if !ok {
+ // NOTE: This approach of using len(id) to
+ // figure out the number of lines of history
+ // only works as long as we clear the history
+ // when we output something that's not
+ // accounted for in the map, such as a line
+ // with no ID.
+ line = len(ids)
+ ids[jm.ID] = line
+ if termInfo != nil {
+ fmt.Fprintf(out, "\n")
+ }
+ }
+ diff = len(ids) - line
+ if termInfo != nil {
+ cursorUp(out, termInfo, diff)
+ }
+ } else {
+ // When outputting something that isn't progress
+ // output, clear the history of previous lines. We
+ // don't want progress entries from some previous
+ // operation to be updated (for example, pull -a
+ // with multiple tags).
+ ids = make(map[string]int)
+ }
+ err := jm.Display(out, termInfo)
+ if jm.ID != "" && termInfo != nil {
+ cursorDown(out, termInfo, diff)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type stream interface {
+ io.Writer
+ FD() uintptr
+ IsTerminal() bool
+}
+
+// DisplayJSONMessagesToStream prints json messages to the output stream
+func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error {
+ return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
index c684aa81f..48b86771e 100644
--- a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
@@ -3,8 +3,9 @@
package mount
import (
- "golang.org/x/sys/unix"
"unsafe"
+
+ "golang.org/x/sys/unix"
)
// #include <stdlib.h>
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
index ad9ab57f8..069ed8f2d 100644
--- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
@@ -4,16 +4,23 @@ package mount
/*
#include <stdio.h>
+#include <stdlib.h>
#include <sys/mnttab.h>
*/
import "C"
import (
"fmt"
+ "unsafe"
)
func parseMountTable() ([]*Info, error) {
- mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+ path := C.CString(C.MNTTAB)
+ defer C.free(unsafe.Pointer(path))
+ mode := C.CString("r")
+ defer C.free(unsafe.Pointer(mode))
+
+ mnttab := C.fopen(path, mode)
if mnttab == nil {
return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 000000000..6a111a3ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,137 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+const buffer32K = 32 * 1024
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+ buffer32KPool = newBufferPoolWithSize(buffer32K)
+)
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ return &BufioReaderPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+type bufferPool struct {
+ pool sync.Pool
+}
+
+func newBufferPoolWithSize(size int) *bufferPool {
+ return &bufferPool{
+ pool: sync.Pool{
+ New: func() interface{} { return make([]byte, size) },
+ },
+ }
+}
+
+func (bp *bufferPool) Get() []byte {
+ return bp.pool.Get().([]byte)
+}
+
+func (bp *bufferPool) Put(b []byte) {
+ bp.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := buffer32KPool.Get()
+ written, err = io.CopyBuffer(dst, src, buf)
+ buffer32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ return &BufioWriterPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
new file mode 100644
index 000000000..a018a203f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -0,0 +1,190 @@
+package stdcopy
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// StdType is the type of standard stream
+// a writer can multiplex to.
+type StdType byte
+
+const (
+ // Stdin represents standard input stream type.
+ Stdin StdType = iota
+ // Stdout represents standard output stream type.
+ Stdout
+ // Stderr represents standard error steam type.
+ Stderr
+ // Systemerr represents errors originating from the system that make it
+ // into the the multiplexed stream.
+ Systemerr
+
+ stdWriterPrefixLen = 8
+ stdWriterFdIndex = 0
+ stdWriterSizeIndex = 4
+
+ startingBufLen = 32*1024 + stdWriterPrefixLen + 1
+)
+
+var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
+
+// stdWriter is wrapper of io.Writer with extra customized info.
+type stdWriter struct {
+ io.Writer
+ prefix byte
+}
+
+// Write sends the buffer to the underneath writer.
+// It inserts the prefix header before the buffer,
+// so stdcopy.StdCopy knows where to multiplex the output.
+// It makes stdWriter to implement io.Writer.
+func (w *stdWriter) Write(p []byte) (n int, err error) {
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("Writer not instantiated")
+ }
+ if p == nil {
+ return 0, nil
+ }
+
+ header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
+ binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Write(header[:])
+ buf.Write(p)
+
+ n, err = w.Writer.Write(buf.Bytes())
+ n -= stdWriterPrefixLen
+ if n < 0 {
+ n = 0
+ }
+
+ buf.Reset()
+ bufPool.Put(buf)
+ return
+}
+
+// NewStdWriter instantiates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
+func NewStdWriter(w io.Writer, t StdType) io.Writer {
+ return &stdWriter{
+ Writer: w,
+ prefix: byte(t),
+ }
+}
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, startingBufLen)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < stdWriterPrefixLen {
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ return 0, er
+ }
+ }
+
+ stream := StdType(buf[stdWriterFdIndex])
+ // Check the first byte to know where to write
+ switch stream {
+ case Stdin:
+ fallthrough
+ case Stdout:
+ // Write on stdout
+ out = dstout
+ case Stderr:
+ // Write on stderr
+ out = dsterr
+ case Systemerr:
+ // If we're on Systemerr, we won't write anywhere.
+ // NB: if this code changes later, make sure you don't try to write
+ // to outstream if Systemerr is the stream
+ out = nil
+ default:
+ return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+stdWriterPrefixLen > bufLen {
+ buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < frameSize+stdWriterPrefixLen {
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ return 0, er
+ }
+ }
+
+ // we might have an error from the source mixed up in our multiplexed
+ // stream. if we do, return it.
+ if stream == Systemerr {
+ return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen]))
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
+ if ew != nil {
+ return 0, ew
+ }
+
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ return 0, io.ErrShortWrite
+ }
+ written += int64(nw)
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+stdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + stdWriterPrefixLen
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
deleted file mode 100644
index 192e36788..000000000
--- a/vendor/github.com/docker/docker/pkg/system/events_windows.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package system
-
-// This file implements syscalls for Win32 events which are not implemented
-// in golang.
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-var (
- procCreateEvent = modkernel32.NewProc("CreateEventW")
- procOpenEvent = modkernel32.NewProc("OpenEventW")
- procSetEvent = modkernel32.NewProc("SetEvent")
- procResetEvent = modkernel32.NewProc("ResetEvent")
- procPulseEvent = modkernel32.NewProc("PulseEvent")
-)
-
-// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
-func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) {
- namep, _ := windows.UTF16PtrFromString(name)
- var _p1 uint32
- if manualReset {
- _p1 = 1
- }
- var _p2 uint32
- if initialState {
- _p2 = 1
- }
- r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
- use(unsafe.Pointer(namep))
- handle = windows.Handle(r0)
- if handle == windows.InvalidHandle {
- err = e1
- }
- return
-}
-
-// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
-func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) {
- namep, _ := windows.UTF16PtrFromString(name)
- var _p1 uint32
- if inheritHandle {
- _p1 = 1
- }
- r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
- use(unsafe.Pointer(namep))
- handle = windows.Handle(r0)
- if handle == windows.InvalidHandle {
- err = e1
- }
- return
-}
-
-// SetEvent implements win32 SetEvent func in golang.
-func SetEvent(handle windows.Handle) (err error) {
- return setResetPulse(handle, procSetEvent)
-}
-
-// ResetEvent implements win32 ResetEvent func in golang.
-func ResetEvent(handle windows.Handle) (err error) {
- return setResetPulse(handle, procResetEvent)
-}
-
-// PulseEvent implements win32 PulseEvent func in golang.
-func PulseEvent(handle windows.Handle) (err error) {
- return setResetPulse(handle, procPulseEvent)
-}
-
-func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) {
- r0, _, _ := proc.Call(uintptr(handle))
- if r0 != 0 {
- err = syscall.Errno(r0)
- }
- return
-}
-
-var temp unsafe.Pointer
-
-// use ensures a variable is kept alive without the GC freeing while still needed
-func use(p unsafe.Pointer) {
- temp = p
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go
new file mode 100644
index 000000000..a219895e6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/init_unix.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package system
+
+// InitLCOW does nothing since LCOW is a windows only feature
+func InitLCOW(experimental bool) {
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go
index 019c66441..e75183726 100644
--- a/vendor/github.com/docker/docker/pkg/system/init_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go
@@ -8,9 +8,10 @@ import "os"
// on build number. @jhowardmsft
var lcowSupported = false
-func init() {
+// InitLCOW sets whether LCOW is supported or not
+func InitLCOW(experimental bool) {
// LCOW initialization
- if os.Getenv("LCOW_SUPPORTED") != "" {
+ if experimental && os.Getenv("LCOW_SUPPORTED") != "" {
lcowSupported = true
}
diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go
index f634a6be6..4160616f4 100644
--- a/vendor/github.com/docker/docker/pkg/system/path.go
+++ b/vendor/github.com/docker/docker/pkg/system/path.go
@@ -1,6 +1,13 @@
package system
-import "runtime"
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/containerd/continuity/pathdriver"
+)
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -19,3 +26,35 @@ func DefaultPathEnv(platform string) string {
return defaultUnixPathEnv
}
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive.
+// On Linux: this is a no-op.
+// On Windows: this does the following>
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be contatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
+ if runtime.GOOS != "windows" || LCOWSupported() {
+ return path, nil
+ }
+
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !driver.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
deleted file mode 100644
index f3762e69d..000000000
--- a/vendor/github.com/docker/docker/pkg/system/path_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !windows
-
-package system
-
-// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
-// is the system drive. This is a no-op on Linux.
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
- return path, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
deleted file mode 100644
index aab891522..000000000
--- a/vendor/github.com/docker/docker/pkg/system/path_windows.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build windows
-
-package system
-
-import (
- "fmt"
- "path/filepath"
- "strings"
-)
-
-// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
-// This is used, for example, when validating a user provided path in docker cp.
-// If a drive letter is supplied, it must be the system drive. The drive letter
-// is always removed. Also, it translates it to OS semantics (IOW / to \). We
-// need the path in this syntax so that it can ultimately be concatenated with
-// a Windows long-path which doesn't support drive-letters. Examples:
-// C: --> Fail
-// C:\ --> \
-// a --> a
-// /a --> \a
-// d:\ --> Fail
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
- if len(path) == 2 && string(path[1]) == ":" {
- return "", fmt.Errorf("No relative path specified in %q", path)
- }
- if !filepath.IsAbs(path) || len(path) < 2 {
- return filepath.FromSlash(path), nil
- }
- if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
- return "", fmt.Errorf("The specified path is not on the system drive (C:)")
- }
- return filepath.FromSlash(path[2:]), nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
index 66bf6e28e..1939f9518 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -5,10 +5,10 @@ import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
- mode: uint32(s.Mode),
+ mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
- rdev: uint64(s.Rdev),
+ rdev: s.Rdev,
mtim: s.Mtim}, nil
}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
index f5262bccf..55873c055 100644
--- a/vendor/github.com/docker/docker/pkg/term/ascii.go
+++ b/vendor/github.com/docker/docker/pkg/term/ascii.go
@@ -59,7 +59,7 @@ next:
return nil, fmt.Errorf("Unknown character: '%s'", key)
}
} else {
- codes = append(codes, byte(key[0]))
+ codes = append(codes, key[0])
}
}
return codes, nil
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
index c0332c3cd..b6819b342 100644
--- a/vendor/github.com/docker/docker/pkg/term/term_windows.go
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -23,14 +23,7 @@ type Winsize struct {
Width uint16
}
-const (
- // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
- enableVirtualTerminalInput = 0x0200
- enableVirtualTerminalProcessing = 0x0004
- disableNewlineAutoReturn = 0x0008
-)
-
-// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
+// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
var vtInputSupported bool
// StdStreams returns the standard streams (stdin, stdout, stderr).
@@ -40,8 +33,8 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
var emulateStdin, emulateStdout, emulateStderr bool
fd := os.Stdin.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate that enableVirtualTerminalInput is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
+ // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil {
emulateStdin = true
} else {
vtInputSupported = true
@@ -53,21 +46,21 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
fd = os.Stdout.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate disableNewlineAutoReturn is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
emulateStdout = true
} else {
- winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
}
}
fd = os.Stderr.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate disableNewlineAutoReturn is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
emulateStderr = true
} else {
- winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
}
}
@@ -183,9 +176,9 @@ func SetRawTerminalOutput(fd uintptr) (*State, error) {
return nil, err
}
- // Ignore failures, since disableNewlineAutoReturn might not be supported on this
+ // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
// version of Windows.
- winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
+ winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN)
return state, err
}
@@ -215,7 +208,7 @@ func MakeRaw(fd uintptr) (*State, error) {
mode |= winterm.ENABLE_INSERT_MODE
mode |= winterm.ENABLE_QUICK_EDIT_MODE
if vtInputSupported {
- mode |= enableVirtualTerminalInput
+ mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT
}
err = winterm.SetConsoleMode(fd, mode)
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
index 3e25eb7a4..0f21abcc2 100644
--- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go
+++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
@@ -29,6 +29,8 @@ func MakeRaw(fd uintptr) (*State, error) {
termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
termios.Cflag &^= (unix.CSIZE | unix.PARENB)
termios.Cflag |= unix.CS8
+ termios.Cc[unix.VMIN] = 1
+ termios.Cc[unix.VTIME] = 0
if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go
index f58367fe6..85c4d9d67 100644
--- a/vendor/github.com/docker/docker/pkg/term/winsize.go
+++ b/vendor/github.com/docker/docker/pkg/term/winsize.go
@@ -3,28 +3,18 @@
package term
import (
- "unsafe"
-
"golang.org/x/sys/unix"
)
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
- ws := &Winsize{}
- _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
- // Skipp errno = 0
- if err == 0 {
- return ws, nil
- }
+ uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+ ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
return ws, err
}
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
- _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
- // Skipp errno = 0
- if err == 0 {
- return nil
- }
- return err
+ uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y}
+ return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws)
}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
deleted file mode 100644
index e4dec3a5d..000000000
--- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build go1.8
-
-package tlsconfig
-
-import "crypto/tls"
-
-// Clone returns a clone of tls.Config. This function is provided for
-// compatibility for go1.7 that doesn't include this method in stdlib.
-func Clone(c *tls.Config) *tls.Config {
- return c.Clone()
-}
diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
index 1e6ea90e3..1b5179c70 100644
--- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
+++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go
@@ -50,7 +50,6 @@ func DefaultProfile() *types.Seccomp {
"access",
"adjtimex",
"alarm",
- "alarm",
"bind",
"brk",
"capget",
@@ -488,6 +487,7 @@ func DefaultProfile() *types.Seccomp {
"mount",
"name_to_handle_at",
"perf_event_open",
+ "quotactl",
"setdomainname",
"sethostname",
"setns",
diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf
index 7608b0e33..bd3c283e2 100644
--- a/vendor/github.com/docker/docker/vendor.conf
+++ b/vendor/github.com/docker/docker/vendor.conf
@@ -1,33 +1,37 @@
# the following lines are in sorted order, FYI
-github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
-github.com/Microsoft/hcsshim v0.6.2
-github.com/Microsoft/go-winio v0.4.4
-github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
+github.com/Microsoft/hcsshim v0.6.5
+github.com/Microsoft/go-winio v0.4.5
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
github.com/gorilla/context v1.1
github.com/gorilla/mux v1.1
-github.com/jhowardmsft/opengcs v0.0.12
+github.com/Microsoft/opengcs v0.3.4
github.com/kr/pty 5cf931ef8f
github.com/mattn/go-shellwords v1.0.3
-github.com/sirupsen/logrus v1.0.1
+github.com/sirupsen/logrus v1.0.3
github.com/tchap/go-patricia v2.2.6
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
-golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
+golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/pmezard/go-difflib v1.0.0
+github.com/gotestyourself/gotestyourself v1.1.0
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
github.com/imdario/mergo 0.2.1
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
+github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
+github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
+github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
+
#get libnetwork packages
-github.com/docker/libnetwork 248fd5ea6a67f8810da322e6e7441e8de96a9045 https://github.com/dmcgowan/libnetwork.git
+github.com/docker/libnetwork 0f08d31bf0e640e0cdc6d5161227f87602d605c5
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -62,9 +66,9 @@ github.com/pborman/uuid v1.0
google.golang.org/grpc v1.3.0
# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
-github.com/opencontainers/runc e9325d442f5979c4f79bfa9e09bdf7abb74ba03b https://github.com/dmcgowan/runc.git
+github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
-github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs
+github.com/opencontainers/runtime-spec v1.0.0
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
@@ -101,17 +105,15 @@ github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd
-github.com/containerd/containerd fc10004571bb9b26695ccbf2dd4a83213f60b93e https://github.com/dmcgowan/containerd.git
+github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
-github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
-github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
# cluster
-github.com/docker/swarmkit 8bdecc57887ffc598b63d6433f58e0d2852112c3 https://github.com/dmcgowan/swarmkit.git
+github.com/docker/swarmkit 941a01844b89c56aa61086fecb167ab3af1de22b
github.com/gogo/protobuf v0.4
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
-golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
+golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
new file mode 100644
index 000000000..706634474
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
@@ -0,0 +1,6 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+You can find the Docker license at the following link:
+https://raw.githubusercontent.com/docker/docker/master/LICENSE
diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE
new file mode 100644
index 000000000..f3ce3a9aa
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013-2018, go-dockerclient authors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown
new file mode 100644
index 000000000..86824d6c5
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/README.markdown
@@ -0,0 +1,133 @@
+# go-dockerclient
+
+[![Travis Build Status](https://travis-ci.org/fsouza/go-dockerclient.svg?branch=master)](https://travis-ci.org/fsouza/go-dockerclient)
+[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/4m374pti06ubg2l7?svg=true)](https://ci.appveyor.com/project/fsouza/go-dockerclient)
+[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
+
+This package presents a client for the Docker remote API. It also provides
+support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
+
+This package also provides support for docker's network API, which is a simple
+passthrough to the libnetwork remote API. Note that docker's network API is
+only available in docker 1.8 and above, and only enabled in docker if
+DOCKER_EXPERIMENTAL is defined during the docker build process.
+
+For more details, check the [remote API
+documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ endpoint := "unix:///var/run/docker.sock"
+ client, err := docker.NewClient(endpoint)
+ if err != nil {
+ panic(err)
+ }
+ imgs, err := client.ListImages(docker.ListImagesOptions{All: false})
+ if err != nil {
+ panic(err)
+ }
+ for _, img := range imgs {
+ fmt.Println("ID: ", img.ID)
+ fmt.Println("RepoTags: ", img.RepoTags)
+ fmt.Println("Created: ", img.Created)
+ fmt.Println("Size: ", img.Size)
+ fmt.Println("VirtualSize: ", img.VirtualSize)
+ fmt.Println("ParentId: ", img.ParentID)
+ }
+}
+```
+
+## Using with TLS
+
+In order to instantiate the client for a TLS-enabled daemon, you should use
+NewTLSClient, passing the endpoint and path for key and certificates as
+parameters.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ endpoint := "tcp://[ip]:[port]"
+ path := os.Getenv("DOCKER_CERT_PATH")
+ ca := fmt.Sprintf("%s/ca.pem", path)
+ cert := fmt.Sprintf("%s/cert.pem", path)
+ key := fmt.Sprintf("%s/key.pem", path)
+ client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
+ // use client
+}
+```
+
+If using [docker-machine](https://docs.docker.com/machine/), or another
+application that exports environment variables `DOCKER_HOST`,
+`DOCKER_TLS_VERIFY`, `DOCKER_CERT_PATH`, you can use NewClientFromEnv.
+
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ client, _ := docker.NewClientFromEnv()
+ // use client
+}
+```
+
+See the documentation for more details.
+
+## Developing
+
+All development commands can be seen in the [Makefile](Makefile).
+
+Commited code must pass:
+
+* [golint](https://github.com/golang/lint) (with some exceptions, see the Makefile).
+* [go vet](https://golang.org/cmd/vet/)
+* [gofmt](https://golang.org/cmd/gofmt)
+* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
+
+Running `make test` will check all of these. If your editor does not
+automatically call ``gofmt -s``, `make fmt` will format all go files in this
+repository.
+
+## Vendoring
+
+go-dockerclient uses [dep](https://github.com/golang/dep/) for vendoring. If
+you're using dep, you should be able to pick go-dockerclient releases and get
+the proper dependencies.
+
+With other vendoring tools, users might need to specify go-dockerclient's
+dependencies manually.
+
+## Using with Docker 1.9 and Go 1.4
+
+There's a tag for using go-dockerclient with Docker 1.9 (which requires
+compiling go-dockerclient with Go 1.4), the tag name is ``docker-1.9/go-1.4``.
+
+The instructions below can be used to get a version of go-dockerclient that compiles with Go 1.4:
+
+```
+% git clone -b docker-1.9/go-1.4 https://github.com/fsouza/go-dockerclient.git $GOPATH/src/github.com/fsouza/go-dockerclient
+% git clone -b v1.9.1 https://github.com/docker/docker.git $GOPATH/src/github.com/docker/docker
+% go get github.com/fsouza/go-dockerclient
+```
diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go
new file mode 100644
index 000000000..c58de8671
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/auth.go
@@ -0,0 +1,185 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+)
+
+// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
+var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
+
+// AuthConfiguration represents authentication options to use in the PushImage
+// method. It represents the authentication in the Docker index server.
+type AuthConfiguration struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Email string `json:"email,omitempty"`
+ ServerAddress string `json:"serveraddress,omitempty"`
+}
+
+// AuthConfigurations represents authentication options to use for the
+// PushImage method accommodating the new X-Registry-Config header
+type AuthConfigurations struct {
+ Configs map[string]AuthConfiguration `json:"configs"`
+}
+
+// AuthConfigurations119 is used to serialize a set of AuthConfigurations
+// for Docker API >= 1.19.
+type AuthConfigurations119 map[string]AuthConfiguration
+
+// dockerConfig represents a registry authentation configuration from the
+// .dockercfg file.
+type dockerConfig struct {
+ Auth string `json:"auth"`
+ Email string `json:"email"`
+}
+
+// NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON
+// in the same format as the .dockercfg file.
+func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) {
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ return NewAuthConfigurations(r)
+}
+
+func cfgPaths(dockerConfigEnv string, homeEnv string) []string {
+ var paths []string
+ if dockerConfigEnv != "" {
+ paths = append(paths, path.Join(dockerConfigEnv, "config.json"))
+ }
+ if homeEnv != "" {
+ paths = append(paths, path.Join(homeEnv, ".docker", "config.json"))
+ paths = append(paths, path.Join(homeEnv, ".dockercfg"))
+ }
+ return paths
+}
+
+// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from
+// system config files. The following files are checked in the order listed:
+// - $DOCKER_CONFIG/config.json if DOCKER_CONFIG set in the environment,
+// - $HOME/.docker/config.json
+// - $HOME/.dockercfg
+func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
+ err := fmt.Errorf("No docker configuration found")
+ var auths *AuthConfigurations
+
+ pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME"))
+ for _, path := range pathsToTry {
+ auths, err = NewAuthConfigurationsFromFile(path)
+ if err == nil {
+ return auths, nil
+ }
+ }
+ return auths, err
+}
+
+// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
+// same format as the .dockercfg file.
+func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
+ var auth *AuthConfigurations
+ confs, err := parseDockerConfig(r)
+ if err != nil {
+ return nil, err
+ }
+ auth, err = authConfigs(confs)
+ if err != nil {
+ return nil, err
+ }
+ return auth, nil
+}
+
+func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(r)
+ byteData := buf.Bytes()
+
+ confsWrapper := struct {
+ Auths map[string]dockerConfig `json:"auths"`
+ }{}
+ if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
+ if len(confsWrapper.Auths) > 0 {
+ return confsWrapper.Auths, nil
+ }
+ }
+
+ var confs map[string]dockerConfig
+ if err := json.Unmarshal(byteData, &confs); err != nil {
+ return nil, err
+ }
+ return confs, nil
+}
+
+// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
+func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
+ c := &AuthConfigurations{
+ Configs: make(map[string]AuthConfiguration),
+ }
+ for reg, conf := range confs {
+ if conf.Auth == "" {
+ continue
+ }
+ data, err := base64.StdEncoding.DecodeString(conf.Auth)
+ if err != nil {
+ return nil, err
+ }
+ userpass := strings.SplitN(string(data), ":", 2)
+ if len(userpass) != 2 {
+ return nil, ErrCannotParseDockercfg
+ }
+ c.Configs[reg] = AuthConfiguration{
+ Email: conf.Email,
+ Username: userpass[0],
+ Password: userpass[1],
+ ServerAddress: reg,
+ }
+ }
+ return c, nil
+}
+
+// AuthStatus returns the authentication status for Docker API versions >= 1.23.
+type AuthStatus struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"`
+ IdentityToken string `json:"IdentityToken,omitempty" yaml:"IdentityToken,omitempty" toml:"IdentityToken,omitempty"`
+}
+
+// AuthCheck validates the given credentials. It returns nil if successful.
+//
+// For Docker API versions >= 1.23, the AuthStatus struct will be populated, otherwise it will be empty.`
+//
+// See https://goo.gl/6nsZkH for more details.
+func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) {
+ var authStatus AuthStatus
+ if conf == nil {
+ return authStatus, errors.New("conf is nil")
+ }
+ resp, err := c.do("POST", "/auth", doOptions{data: conf})
+ if err != nil {
+ return authStatus, err
+ }
+ defer resp.Body.Close()
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return authStatus, err
+ }
+ if len(data) == 0 {
+ return authStatus, nil
+ }
+ if err := json.Unmarshal(data, &authStatus); err != nil {
+ return authStatus, err
+ }
+ return authStatus, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/change.go b/vendor/github.com/fsouza/go-dockerclient/change.go
new file mode 100644
index 000000000..3f936b223
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/change.go
@@ -0,0 +1,43 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import "fmt"
+
+// ChangeType is a type for constants indicating the type of change
+// in a container
+type ChangeType int
+
+const (
+ // ChangeModify is the ChangeType for container modifications
+ ChangeModify ChangeType = iota
+
+ // ChangeAdd is the ChangeType for additions to a container
+ ChangeAdd
+
+ // ChangeDelete is the ChangeType for deletions from a container
+ ChangeDelete
+)
+
+// Change represents a change in a container.
+//
+// See https://goo.gl/Wo0JJp for more details.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go
new file mode 100644
index 000000000..6b754f271
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/client.go
@@ -0,0 +1,1092 @@
+// Copyright 2013 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package docker provides a client for the Docker remote API.
+//
+// See https://goo.gl/o2v3rk for more details on the remote API.
+package docker
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/docker/docker/opts"
+ "github.com/docker/docker/pkg/homedir"
+ "github.com/docker/docker/pkg/jsonmessage"
+ "github.com/docker/docker/pkg/stdcopy"
+)
+
+const (
+ userAgent = "go-dockerclient"
+
+ unixProtocol = "unix"
+ namedPipeProtocol = "npipe"
+)
+
+var (
+ // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
+ ErrInvalidEndpoint = errors.New("invalid endpoint")
+
+ // ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
+ ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
+
+ // ErrInactivityTimeout is returned when a streamable call has been inactive for some time.
+ ErrInactivityTimeout = errors.New("inactivity time exceeded timeout")
+
+ apiVersion112, _ = NewAPIVersion("1.12")
+ apiVersion119, _ = NewAPIVersion("1.19")
+ apiVersion124, _ = NewAPIVersion("1.24")
+ apiVersion125, _ = NewAPIVersion("1.25")
+)
+
+// APIVersion is an internal representation of a version of the Remote API.
+type APIVersion []int
+
+// NewAPIVersion returns an instance of APIVersion for the given string.
+//
+// The given string must be in the form <major>.<minor>.<patch>, where <major>,
+// <minor> and <patch> are integer numbers.
+func NewAPIVersion(input string) (APIVersion, error) {
+ if !strings.Contains(input, ".") {
+ return nil, fmt.Errorf("Unable to parse version %q", input)
+ }
+ raw := strings.Split(input, "-")
+ arr := strings.Split(raw[0], ".")
+ ret := make(APIVersion, len(arr))
+ var err error
+ for i, val := range arr {
+ ret[i], err = strconv.Atoi(val)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
+ }
+ }
+ return ret, nil
+}
+
+func (version APIVersion) String() string {
+ var str string
+ for i, val := range version {
+ str += strconv.Itoa(val)
+ if i < len(version)-1 {
+ str += "."
+ }
+ }
+ return str
+}
+
+// LessThan is a function for comparing APIVersion structs
+func (version APIVersion) LessThan(other APIVersion) bool {
+ return version.compare(other) < 0
+}
+
+// LessThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) <= 0
+}
+
+// GreaterThan is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThan(other APIVersion) bool {
+ return version.compare(other) > 0
+}
+
+// GreaterThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) >= 0
+}
+
+func (version APIVersion) compare(other APIVersion) int {
+ for i, v := range version {
+ if i <= len(other)-1 {
+ otherVersion := other[i]
+
+ if v < otherVersion {
+ return -1
+ } else if v > otherVersion {
+ return 1
+ }
+ }
+ }
+ if len(version) > len(other) {
+ return 1
+ }
+ if len(version) < len(other) {
+ return -1
+ }
+ return 0
+}
+
+// Client is the basic type of this package. It provides methods for
+// interaction with the API.
+type Client struct {
+ SkipServerVersionCheck bool
+ HTTPClient *http.Client
+ TLSConfig *tls.Config
+ Dialer Dialer
+
+ endpoint string
+ endpointURL *url.URL
+ eventMonitor *eventMonitoringState
+ requestedAPIVersion APIVersion
+ serverAPIVersion APIVersion
+ expectedAPIVersion APIVersion
+}
+
+// Dialer is an interface that allows network connections to be dialed
+// (net.Dialer fulfills this interface) and named pipes (a shim using
+// winio.DialPipe)
+type Dialer interface {
+ Dial(network, address string) (net.Conn, error)
+}
+
+// NewClient returns a Client instance ready for communication with the given
+// server endpoint. It will use the latest remote API version available in the
+// server.
+func NewClient(endpoint string) (*Client, error) {
+ client, err := NewVersionedClient(endpoint, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates . It will use the latest remote API version
+// available in the server.
+func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
+ client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file). It will use the latest remote API version available in the server.
+func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
+ client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewVersionedClient returns a Client instance ready for communication with
+// the given server endpoint, using a specific remote API version.
+func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, false)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+ c := &Client{
+ HTTPClient: defaultClient(),
+ Dialer: &net.Dialer{},
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }
+ c.initializeNativeClient(defaultTransport)
+ return c, nil
+}
+
+// WithTransport replaces underlying HTTP client of Docker Client by accepting
+// a function that returns pointer to a transport object.
+func (c *Client) WithTransport(trFunc func() *http.Transport) {
+ c.initializeNativeClient(trFunc)
+}
+
+// NewVersionnedTLSClient is like NewVersionedClient, but with ann extra n.
+//
+// Deprecated: Use NewVersionedTLSClient instead.
+func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
+}
+
+// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates, using a specific remote API version.
+func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ var certPEMBlock []byte
+ var keyPEMBlock []byte
+ var caPEMCert []byte
+ if _, err := os.Stat(cert); !os.IsNotExist(err) {
+ certPEMBlock, err = ioutil.ReadFile(cert)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if _, err := os.Stat(key); !os.IsNotExist(err) {
+ keyPEMBlock, err = ioutil.ReadFile(key)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if _, err := os.Stat(ca); !os.IsNotExist(err) {
+ caPEMCert, err = ioutil.ReadFile(ca)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
+}
+
+// NewClientFromEnv returns a Client instance ready for communication created from
+// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
+//
+// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
+// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
+func NewClientFromEnv() (*Client, error) {
+ client, err := NewVersionedClientFromEnv("")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
+// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
+// and using a specific remote API version.
+//
+// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
+// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
+func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
+ dockerEnv, err := getDockerEnv()
+ if err != nil {
+ return nil, err
+ }
+ dockerHost := dockerEnv.dockerHost
+ if dockerEnv.dockerTLSVerify {
+ parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
+ }
+ cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
+ key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
+ ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
+ return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
+ }
+ return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
+}
+
+// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file), using a specific remote API version.
+func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, true)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+ tlsConfig := &tls.Config{}
+ if certPEMBlock != nil && keyPEMBlock != nil {
+ tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ }
+ if caPEMCert == nil {
+ tlsConfig.InsecureSkipVerify = true
+ } else {
+ caPool := x509.NewCertPool()
+ if !caPool.AppendCertsFromPEM(caPEMCert) {
+ return nil, errors.New("Could not add RootCA pem")
+ }
+ tlsConfig.RootCAs = caPool
+ }
+ tr := defaultTransport()
+ tr.TLSClientConfig = tlsConfig
+ if err != nil {
+ return nil, err
+ }
+ c := &Client{
+ HTTPClient: &http.Client{Transport: tr},
+ TLSConfig: tlsConfig,
+ Dialer: &net.Dialer{},
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }
+ c.initializeNativeClient(defaultTransport)
+ return c, nil
+}
+
+// SetTimeout takes a timeout and applies it to the HTTPClient. It should not
+// be called concurrently with any other Client methods.
+func (c *Client) SetTimeout(t time.Duration) {
+ if c.HTTPClient != nil {
+ c.HTTPClient.Timeout = t
+ }
+}
+
+func (c *Client) checkAPIVersion() error {
+ serverAPIVersionString, err := c.getServerAPIVersionString()
+ if err != nil {
+ return err
+ }
+ c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
+ if err != nil {
+ return err
+ }
+ if c.requestedAPIVersion == nil {
+ c.expectedAPIVersion = c.serverAPIVersion
+ } else {
+ c.expectedAPIVersion = c.requestedAPIVersion
+ }
+ return nil
+}
+
+// Endpoint returns the current endpoint. It's useful for getting the endpoint
+// when using functions that get this data from the environment (like
+// NewClientFromEnv.
+func (c *Client) Endpoint() string {
+ return c.endpoint
+}
+
+// Ping pings the docker server
+//
+// See https://goo.gl/wYfgY1 for more details.
+func (c *Client) Ping() error {
+ return c.PingWithContext(nil)
+}
+
+// PingWithContext pings the docker server
+// The context object can be used to cancel the ping request.
+//
+// See https://goo.gl/wYfgY1 for more details.
+func (c *Client) PingWithContext(ctx context.Context) error {
+ path := "/_ping"
+ resp, err := c.do("GET", path, doOptions{context: ctx})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return newError(resp)
+ }
+ resp.Body.Close()
+ return nil
+}
+
+func (c *Client) getServerAPIVersionString() (version string, err error) {
+ resp, err := c.do("GET", "/version", doOptions{})
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
+ }
+ var versionResponse map[string]interface{}
+ if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
+ return "", err
+ }
+ if version, ok := (versionResponse["ApiVersion"]).(string); ok {
+ return version, nil
+ }
+ return "", nil
+}
+
+type doOptions struct {
+ data interface{}
+ forceJSON bool
+ headers map[string]string
+ context context.Context
+}
+
+func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
+ var params io.Reader
+ if doOptions.data != nil || doOptions.forceJSON {
+ buf, err := json.Marshal(doOptions.data)
+ if err != nil {
+ return nil, err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return nil, err
+ }
+ }
+ protocol := c.endpointURL.Scheme
+ var u string
+ switch protocol {
+ case unixProtocol, namedPipeProtocol:
+ u = c.getFakeNativeURL(path)
+ default:
+ u = c.getURL(path)
+ }
+
+ req, err := http.NewRequest(method, u, params)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if doOptions.data != nil {
+ req.Header.Set("Content-Type", "application/json")
+ } else if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+
+ for k, v := range doOptions.headers {
+ req.Header.Set(k, v)
+ }
+
+ ctx := doOptions.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ resp, err := c.HTTPClient.Do(req.WithContext(ctx))
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, ErrConnectionRefused
+ }
+
+ return nil, chooseError(ctx, err)
+ }
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return nil, newError(resp)
+ }
+ return resp, nil
+}
+
+type streamOptions struct {
+ setRawTerminal bool
+ rawJSONStream bool
+ useJSONDecoder bool
+ headers map[string]string
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+ reqSent chan struct{}
+ // timeout is the initial connection timeout
+ timeout time.Duration
+ // Timeout with no data is received, it's reset every time new data
+ // arrives
+ inactivityTimeout time.Duration
+ context context.Context
+}
+
+// if error in context, return that instead of generic http error
+func chooseError(ctx context.Context, err error) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ return err
+ }
+}
+
+func (c *Client) stream(method, path string, streamOptions streamOptions) error {
+ if (method == "POST" || method == "PUT") && streamOptions.in == nil {
+ streamOptions.in = bytes.NewReader(nil)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return err
+ }
+ }
+ req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ for key, val := range streamOptions.headers {
+ req.Header.Set(key, val)
+ }
+ var resp *http.Response
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if streamOptions.stdout == nil {
+ streamOptions.stdout = ioutil.Discard
+ }
+ if streamOptions.stderr == nil {
+ streamOptions.stderr = ioutil.Discard
+ }
+
+ // make a sub-context so that our active cancellation does not affect parent
+ ctx := streamOptions.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ subCtx, cancelRequest := context.WithCancel(ctx)
+ defer cancelRequest()
+
+ if protocol == unixProtocol || protocol == namedPipeProtocol {
+ var dial net.Conn
+ dial, err = c.Dialer.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ go func() {
+ <-subCtx.Done()
+ dial.Close()
+ }()
+ breader := bufio.NewReader(dial)
+ err = req.Write(dial)
+ if err != nil {
+ return chooseError(subCtx, err)
+ }
+
+ // ReadResponse may hang if server does not replay
+ if streamOptions.timeout > 0 {
+ dial.SetDeadline(time.Now().Add(streamOptions.timeout))
+ }
+
+ if streamOptions.reqSent != nil {
+ close(streamOptions.reqSent)
+ }
+ if resp, err = http.ReadResponse(breader, req); err != nil {
+ // Cancel timeout for future I/O operations
+ if streamOptions.timeout > 0 {
+ dial.SetDeadline(time.Time{})
+ }
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+
+ return chooseError(subCtx, err)
+ }
+ } else {
+ if resp, err = c.HTTPClient.Do(req.WithContext(subCtx)); err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return chooseError(subCtx, err)
+ }
+ if streamOptions.reqSent != nil {
+ close(streamOptions.reqSent)
+ }
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return newError(resp)
+ }
+ var canceled uint32
+ if streamOptions.inactivityTimeout > 0 {
+ var ch chan<- struct{}
+ resp.Body, ch = handleInactivityTimeout(resp.Body, streamOptions.inactivityTimeout, cancelRequest, &canceled)
+ defer close(ch)
+ }
+ err = handleStreamResponse(resp, &streamOptions)
+ if err != nil {
+ if atomic.LoadUint32(&canceled) != 0 {
+ return ErrInactivityTimeout
+ }
+ return chooseError(subCtx, err)
+ }
+ return nil
+}
+
+func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) error {
+ var err error
+ if !streamOptions.useJSONDecoder && resp.Header.Get("Content-Type") != "application/json" {
+ if streamOptions.setRawTerminal {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ } else {
+ _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
+ }
+ return err
+ }
+ // if we want to get raw json stream, just copy it back to output
+ // without decoding it
+ if streamOptions.rawJSONStream {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ return err
+ }
+ if st, ok := streamOptions.stdout.(interface {
+ io.Writer
+ FD() uintptr
+ IsTerminal() bool
+ }); ok {
+ err = jsonmessage.DisplayJSONMessagesToStream(resp.Body, st, nil)
+ } else {
+ err = jsonmessage.DisplayJSONMessagesStream(resp.Body, streamOptions.stdout, 0, false, nil)
+ }
+ return err
+}
+
+type proxyReader struct {
+ io.ReadCloser
+ calls uint64
+}
+
+func (p *proxyReader) callCount() uint64 {
+ return atomic.LoadUint64(&p.calls)
+}
+
+func (p *proxyReader) Read(data []byte) (int, error) {
+ atomic.AddUint64(&p.calls, 1)
+ return p.ReadCloser.Read(data)
+}
+
+func handleInactivityTimeout(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) {
+ done := make(chan struct{})
+ proxyReader := &proxyReader{ReadCloser: reader}
+ go func() {
+ var lastCallCount uint64
+ for {
+ select {
+ case <-time.After(timeout):
+ case <-done:
+ return
+ }
+ curCallCount := proxyReader.callCount()
+ if curCallCount == lastCallCount {
+ atomic.AddUint32(canceled, 1)
+ cancelRequest()
+ return
+ }
+ lastCallCount = curCallCount
+ }
+ }()
+ return proxyReader, done
+}
+
+type hijackOptions struct {
+ success chan struct{}
+ setRawTerminal bool
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+ data interface{}
+}
+
+// CloseWaiter is an interface with methods for closing the underlying resource
+// and then waiting for it to finish processing.
+type CloseWaiter interface {
+ io.Closer
+ Wait() error
+}
+
+type waiterFunc func() error
+
+func (w waiterFunc) Wait() error { return w() }
+
+type closerFunc func() error
+
+func (c closerFunc) Close() error { return c() }
+
+func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return nil, err
+ }
+ }
+ var params io.Reader
+ if hijackOptions.data != nil {
+ buf, err := json.Marshal(hijackOptions.data)
+ if err != nil {
+ return nil, err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ req, err := http.NewRequest(method, c.getURL(path), params)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "tcp")
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != unixProtocol && protocol != namedPipeProtocol {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ if c.TLSConfig != nil && protocol != unixProtocol && protocol != namedPipeProtocol {
+ netDialer, ok := c.Dialer.(*net.Dialer)
+ if !ok {
+ return nil, ErrTLSNotSupported
+ }
+ dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ dial, err = c.Dialer.Dial(protocol, address)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ errs := make(chan error, 1)
+ quit := make(chan struct{})
+ go func() {
+ clientconn := httputil.NewClientConn(dial, nil)
+ defer clientconn.Close()
+ clientconn.Do(req)
+ if hijackOptions.success != nil {
+ hijackOptions.success <- struct{}{}
+ <-hijackOptions.success
+ }
+ rwc, br := clientconn.Hijack()
+ defer rwc.Close()
+
+ errChanOut := make(chan error, 1)
+ errChanIn := make(chan error, 2)
+ if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
+ close(errChanOut)
+ } else {
+ // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
+ // Otherwise, if the only stream you care about is stdin, your attach session
+ // will "hang" until the container terminates, even though you're not reading
+ // stdout/stderr
+ if hijackOptions.stdout == nil {
+ hijackOptions.stdout = ioutil.Discard
+ }
+ if hijackOptions.stderr == nil {
+ hijackOptions.stderr = ioutil.Discard
+ }
+
+ go func() {
+ defer func() {
+ if hijackOptions.in != nil {
+ if closer, ok := hijackOptions.in.(io.Closer); ok {
+ closer.Close()
+ }
+ errChanIn <- nil
+ }
+ }()
+
+ var err error
+ if hijackOptions.setRawTerminal {
+ _, err = io.Copy(hijackOptions.stdout, br)
+ } else {
+ _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
+ }
+ errChanOut <- err
+ }()
+ }
+
+ go func() {
+ var err error
+ if hijackOptions.in != nil {
+ _, err = io.Copy(rwc, hijackOptions.in)
+ }
+ errChanIn <- err
+ rwc.(interface {
+ CloseWrite() error
+ }).CloseWrite()
+ }()
+
+ var errIn error
+ select {
+ case errIn = <-errChanIn:
+ case <-quit:
+ }
+
+ var errOut error
+ select {
+ case errOut = <-errChanOut:
+ case <-quit:
+ }
+
+ if errIn != nil {
+ errs <- errIn
+ } else {
+ errs <- errOut
+ }
+ }()
+
+ return struct {
+ closerFunc
+ waiterFunc
+ }{
+ closerFunc(func() error { close(quit); return nil }),
+ waiterFunc(func() error { return <-errs }),
+ }, nil
+}
+
+func (c *Client) getURL(path string) string {
+ urlStr := strings.TrimRight(c.endpointURL.String(), "/")
+ if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol {
+ urlStr = ""
+ }
+ if c.requestedAPIVersion != nil {
+ return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+// getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX
+// domain socket to the given path.
+func (c *Client) getFakeNativeURL(path string) string {
+ u := *c.endpointURL // Copy.
+
+ // Override URL so that net/http will not complain.
+ u.Scheme = "http"
+ u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
+ u.Path = ""
+ urlStr := strings.TrimRight(u.String(), "/")
+ if c.requestedAPIVersion != nil {
+ return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+type jsonMessage struct {
+ Status string `json:"status,omitempty"`
+ Progress string `json:"progress,omitempty"`
+ Error string `json:"error,omitempty"`
+ Stream string `json:"stream,omitempty"`
+}
+
+func queryString(opts interface{}) string {
+ if opts == nil {
+ return ""
+ }
+ value := reflect.ValueOf(opts)
+ if value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ if value.Kind() != reflect.Struct {
+ return ""
+ }
+ items := url.Values(map[string][]string{})
+ for i := 0; i < value.NumField(); i++ {
+ field := value.Type().Field(i)
+ if field.PkgPath != "" {
+ continue
+ }
+ key := field.Tag.Get("qs")
+ if key == "" {
+ key = strings.ToLower(field.Name)
+ } else if key == "-" {
+ continue
+ }
+ addQueryStringValue(items, key, value.Field(i))
+ }
+ return items.Encode()
+}
+
+func addQueryStringValue(items url.Values, key string, v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ items.Add(key, "1")
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.Int() > 0 {
+ items.Add(key, strconv.FormatInt(v.Int(), 10))
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if v.Uint() > 0 {
+ items.Add(key, strconv.FormatUint(v.Uint(), 10))
+ }
+ case reflect.Float32, reflect.Float64:
+ if v.Float() > 0 {
+ items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
+ }
+ case reflect.String:
+ if v.String() != "" {
+ items.Add(key, v.String())
+ }
+ case reflect.Ptr:
+ if !v.IsNil() {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Map:
+ if len(v.MapKeys()) > 0 {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Array, reflect.Slice:
+ vLen := v.Len()
+ if vLen > 0 {
+ for i := 0; i < vLen; i++ {
+ addQueryStringValue(items, key, v.Index(i))
+ }
+ }
+ }
+}
+
+// Error represents failures in the API. It represents a failure from the API.
+type Error struct {
+ Status int
+ Message string
+}
+
+func newError(resp *http.Response) *Error {
+ type ErrMsg struct {
+ Message string `json:"message"`
+ }
+ defer resp.Body.Close()
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
+ }
+ var emsg ErrMsg
+ err = json.Unmarshal(data, &emsg)
+ if err != nil {
+ return &Error{Status: resp.StatusCode, Message: string(data)}
+ }
+ return &Error{Status: resp.StatusCode, Message: emsg.Message}
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
+}
+
+func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
+ if endpoint != "" && !strings.Contains(endpoint, "://") {
+ endpoint = "tcp://" + endpoint
+ }
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, ErrInvalidEndpoint
+ }
+ if tls && u.Scheme != "unix" {
+ u.Scheme = "https"
+ }
+ switch u.Scheme {
+ case unixProtocol, namedPipeProtocol:
+ return u, nil
+ case "http", "https", "tcp":
+ _, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ if e, ok := err.(*net.AddrError); ok {
+ if e.Err == "missing port in address" {
+ return u, nil
+ }
+ }
+ return nil, ErrInvalidEndpoint
+ }
+ number, err := strconv.ParseInt(port, 10, 64)
+ if err == nil && number > 0 && number < 65536 {
+ if u.Scheme == "tcp" {
+ if tls {
+ u.Scheme = "https"
+ } else {
+ u.Scheme = "http"
+ }
+ }
+ return u, nil
+ }
+ return nil, ErrInvalidEndpoint
+ default:
+ return nil, ErrInvalidEndpoint
+ }
+}
+
+type dockerEnv struct {
+ dockerHost string
+ dockerTLSVerify bool
+ dockerCertPath string
+}
+
+func getDockerEnv() (*dockerEnv, error) {
+ dockerHost := os.Getenv("DOCKER_HOST")
+ var err error
+ if dockerHost == "" {
+ dockerHost = opts.DefaultHost
+ }
+ dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
+ var dockerCertPath string
+ if dockerTLSVerify {
+ dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
+ if dockerCertPath == "" {
+ home := homedir.Get()
+ if home == "" {
+ return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
+ }
+ dockerCertPath = filepath.Join(home, ".docker")
+ dockerCertPath, err = filepath.Abs(dockerCertPath)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return &dockerEnv{
+ dockerHost: dockerHost,
+ dockerTLSVerify: dockerTLSVerify,
+ dockerCertPath: dockerCertPath,
+ }, nil
+}
+
+// defaultTransport returns a new http.Transport with similar default values to
+// http.DefaultTransport, but with idle connections and keepalives disabled.
+func defaultTransport() *http.Transport {
+ transport := defaultPooledTransport()
+ transport.DisableKeepAlives = true
+ transport.MaxIdleConnsPerHost = -1
+ return transport
+}
+
+// defaultPooledTransport returns a new http.Transport with similar default
+// values to http.DefaultTransport. Do not use this for transient transports as
+// it can leak file descriptors over time. Only use this for transports that
+// will be re-used for the same host(s).
+func defaultPooledTransport() *http.Transport {
+ transport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+ return transport
+}
+
+// defaultClient returns a new http.Client with similar default values to
+// http.Client, but with a non-shared Transport, idle connections disabled, and
+// keepalives disabled.
+func defaultClient() *http.Client {
+ return &http.Client{
+ Transport: defaultTransport(),
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go
new file mode 100644
index 000000000..57d7904ea
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package docker
+
+import (
+ "context"
+ "net"
+ "net/http"
+)
+
+// initializeNativeClient initializes the native Unix domain socket client on
+// Unix-style operating systems
+func (c *Client) initializeNativeClient(trFunc func() *http.Transport) {
+ if c.endpointURL.Scheme != unixProtocol {
+ return
+ }
+ sockPath := c.endpointURL.Path
+
+ tr := trFunc()
+
+ tr.Dial = func(network, addr string) (net.Conn, error) {
+ return c.Dialer.Dial(unixProtocol, sockPath)
+ }
+ tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return c.Dialer.Dial(unixProtocol, sockPath)
+ }
+ c.HTTPClient.Transport = tr
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
new file mode 100644
index 000000000..8e7b457d7
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
@@ -0,0 +1,45 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package docker
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+const namedPipeConnectTimeout = 2 * time.Second
+
+type pipeDialer struct {
+ dialFunc func(network, addr string) (net.Conn, error)
+}
+
+func (p pipeDialer) Dial(network, address string) (net.Conn, error) {
+ return p.dialFunc(network, address)
+}
+
+// initializeNativeClient initializes the native Named Pipe client for Windows
+func (c *Client) initializeNativeClient(trFunc func() *http.Transport) {
+ if c.endpointURL.Scheme != namedPipeProtocol {
+ return
+ }
+ namedPipePath := c.endpointURL.Path
+ dialFunc := func(network, addr string) (net.Conn, error) {
+ timeout := namedPipeConnectTimeout
+ return winio.DialPipe(namedPipePath, &timeout)
+ }
+ tr := trFunc()
+ tr.Dial = dialFunc
+ tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return dialFunc(network, addr)
+ }
+ c.Dialer = &pipeDialer{dialFunc}
+ c.HTTPClient.Transport = tr
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go
new file mode 100644
index 000000000..e24c9fb2e
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container.go
@@ -0,0 +1,1623 @@
+// Copyright 2013 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/go-units"
+)
+
+// ErrContainerAlreadyExists is the error returned by CreateContainer when the
+// container already exists.
+var ErrContainerAlreadyExists = errors.New("container already exists")
+
+// ListContainersOptions specify parameters to the ListContainers function.
+//
+// See https://goo.gl/kaOHGw for more details.
+type ListContainersOptions struct {
+ All bool
+ Size bool
+ Limit int
+ Since string
+ Before string
+ Filters map[string][]string
+ Context context.Context
+}
+
+// APIPort is a type that represents a port mapping returned by the Docker API
+type APIPort struct {
+ PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty" toml:"PrivatePort,omitempty"`
+ PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty" toml:"PublicPort,omitempty"`
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"`
+}
+
+// APIMount represents a mount point for a container.
+type APIMount struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Source string `json:"Source,omitempty" yaml:"Source,omitempty" toml:"Source,omitempty"`
+ Destination string `json:"Destination,omitempty" yaml:"Destination,omitempty" toml:"Destination,omitempty"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"`
+ Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"`
+ RW bool `json:"RW,omitempty" yaml:"RW,omitempty" toml:"RW,omitempty"`
+ Propogation string `json:"Propogation,omitempty" yaml:"Propogation,omitempty" toml:"Propogation,omitempty"`
+}
+
+// APIContainers represents each container in the list returned by
+// ListContainers.
+type APIContainers struct {
+ ID string `json:"Id" yaml:"Id" toml:"Id"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"`
+ Command string `json:"Command,omitempty" yaml:"Command,omitempty" toml:"Command,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"`
+ State string `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"`
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"`
+ Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"`
+ SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty" toml:"SizeRw,omitempty"`
+ SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"`
+ Names []string `json:"Names,omitempty" yaml:"Names,omitempty" toml:"Names,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"`
+ Networks NetworkList `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"`
+ Mounts []APIMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"`
+}
+
+// NetworkList encapsulates a map of networks, as returned by the Docker API in
+// ListContainers.
+type NetworkList struct {
+ Networks map[string]ContainerNetwork `json:"Networks" yaml:"Networks,omitempty" toml:"Networks,omitempty"`
+}
+
+// ListContainers returns a slice of containers matching the given criteria.
+//
+// See https://goo.gl/kaOHGw for more details.
+func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
+ path := "/containers/json?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var containers []APIContainers
+ if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
+ return nil, err
+ }
+ return containers, nil
+}
+
+// Port represents the port number and the protocol, in the form
+// <number>/<protocol>. For example: 80/tcp.
+type Port string
+
+// Port returns the number of the port.
+func (p Port) Port() string {
+ return strings.Split(string(p), "/")[0]
+}
+
+// Proto returns the name of the protocol.
+func (p Port) Proto() string {
+ parts := strings.Split(string(p), "/")
+ if len(parts) == 1 {
+ return "tcp"
+ }
+ return parts[1]
+}
+
+// HealthCheck represents one check of health.
+type HealthCheck struct {
+ Start time.Time `json:"Start,omitempty" yaml:"Start,omitempty" toml:"Start,omitempty"`
+ End time.Time `json:"End,omitempty" yaml:"End,omitempty" toml:"End,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"`
+ Output string `json:"Output,omitempty" yaml:"Output,omitempty" toml:"Output,omitempty"`
+}
+
+// Health represents the health of a container.
+type Health struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"`
+ FailingStreak int `json:"FailingStreak,omitempty" yaml:"FailingStreak,omitempty" toml:"FailingStreak,omitempty"`
+ Log []HealthCheck `json:"Log,omitempty" yaml:"Log,omitempty" toml:"Log,omitempty"`
+}
+
+// State represents the state of a container.
+type State struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty" toml:"Status,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"`
+ Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty" toml:"Paused,omitempty"`
+ Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty" toml:"Restarting,omitempty"`
+ OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty" toml:"OOMKilled,omitempty"`
+ RemovalInProgress bool `json:"RemovalInProgress,omitempty" yaml:"RemovalInProgress,omitempty" toml:"RemovalInProgress,omitempty"`
+ Dead bool `json:"Dead,omitempty" yaml:"Dead,omitempty" toml:"Dead,omitempty"`
+ Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty" toml:"Pid,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"`
+ Error string `json:"Error,omitempty" yaml:"Error,omitempty" toml:"Error,omitempty"`
+ StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty" toml:"StartedAt,omitempty"`
+ FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty" toml:"FinishedAt,omitempty"`
+ Health Health `json:"Health,omitempty" yaml:"Health,omitempty" toml:"Health,omitempty"`
+}
+
+// String returns a human-readable description of the state
+func (s *State) String() string {
+ if s.Running {
+ if s.Paused {
+ return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
+ }
+ if s.Restarting {
+ return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
+ }
+
+ return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
+ }
+
+ if s.RemovalInProgress {
+ return "Removal In Progress"
+ }
+
+ if s.Dead {
+ return "Dead"
+ }
+
+ if s.StartedAt.IsZero() {
+ return "Created"
+ }
+
+ if s.FinishedAt.IsZero() {
+ return ""
+ }
+
+ return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
+}
+
+// StateString returns a single string to describe state
+func (s *State) StateString() string {
+ if s.Running {
+ if s.Paused {
+ return "paused"
+ }
+ if s.Restarting {
+ return "restarting"
+ }
+ return "running"
+ }
+
+ if s.Dead {
+ return "dead"
+ }
+
+ if s.StartedAt.IsZero() {
+ return "created"
+ }
+
+ return "exited"
+}
+
+// PortBinding represents the host/container port mapping as returned in the
+// `docker inspect` json
+type PortBinding struct {
+ HostIP string `json:"HostIp,omitempty" yaml:"HostIp,omitempty" toml:"HostIp,omitempty"`
+ HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty" toml:"HostPort,omitempty"`
+}
+
+// PortMapping represents a deprecated field in the `docker inspect` output,
+// and its value as found in NetworkSettings should always be nil
+type PortMapping map[string]string
+
+// ContainerNetwork represents the networking settings of a container per network.
+type ContainerNetwork struct {
+ Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty" toml:"Aliases,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"`
+ IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"`
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"`
+ EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"`
+ NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"`
+}
+
+// NetworkSettings contains network-related information about a container
+type NetworkSettings struct {
+ Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty" toml:"Networks,omitempty"`
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"`
+ Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty" toml:"Bridge,omitempty"`
+ PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty" toml:"PortMapping,omitempty"`
+ Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty" toml:"Ports,omitempty"`
+ NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"`
+ EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"`
+ SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty" toml:"SandboxKey,omitempty"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"`
+ IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"`
+ LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty" toml:"LinkLocalIPv6Address,omitempty"`
+ LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty" toml:"LinkLocalIPv6PrefixLen,omitempty"`
+ SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty" toml:"SecondaryIPAddresses,omitempty"`
+ SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty" toml:"SecondaryIPv6Addresses,omitempty"`
+}
+
+// PortMappingAPI translates the port mappings as contained in NetworkSettings
+// into the format in which they would appear when returned by the API
+func (settings *NetworkSettings) PortMappingAPI() []APIPort {
+ var mapping []APIPort
+ for port, bindings := range settings.Ports {
+ p, _ := parsePort(port.Port())
+ if len(bindings) == 0 {
+ mapping = append(mapping, APIPort{
+ PrivatePort: int64(p),
+ Type: port.Proto(),
+ })
+ continue
+ }
+ for _, binding := range bindings {
+ p, _ := parsePort(port.Port())
+ h, _ := parsePort(binding.HostPort)
+ mapping = append(mapping, APIPort{
+ PrivatePort: int64(p),
+ PublicPort: int64(h),
+ Type: port.Proto(),
+ IP: binding.HostIP,
+ })
+ }
+ }
+ return mapping
+}
+
+func parsePort(rawPort string) (int, error) {
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// Config is the list of configuration options used when creating a container.
+// Config does not contain the options that are specific to starting a container on a
+// given host. Those are contained in HostConfig
+type Config struct {
+ Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty" toml:"Hostname,omitempty"`
+ Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty" toml:"Domainname,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"`
+ MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"`
+ KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"`
+ PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty" toml:"PortSpecs,omitempty"`
+ ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty" toml:"ExposedPorts,omitempty"`
+ PublishService string `json:"PublishService,omitempty" yaml:"PublishService,omitempty" toml:"PublishService,omitempty"`
+ StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty" toml:"StopSignal,omitempty"`
+ StopTimeout int `json:"StopTimeout,omitempty" yaml:"StopTimeout,omitempty" toml:"StopTimeout,omitempty"`
+ Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"`
+ Cmd []string `json:"Cmd" yaml:"Cmd" toml:"Cmd"`
+ Shell []string `json:"Shell,omitempty" yaml:"Shell,omitempty" toml:"Shell,omitempty"`
+ Healthcheck *HealthConfig `json:"Healthcheck,omitempty" yaml:"Healthcheck,omitempty" toml:"Healthcheck,omitempty"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.9 and below only
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"`
+ VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty" toml:"WorkingDir,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"`
+ Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint" toml:"Entrypoint"`
+ SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty" toml:"SecurityOpts,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty" toml:"OnBuild,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"`
+ ArgsEscaped bool `json:"ArgsEscaped,omitempty" yaml:"ArgsEscaped,omitempty" toml:"ArgsEscaped,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty" toml:"StdinOnce,omitempty"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty" toml:"NetworkDisabled,omitempty"`
+
+ // This is no longer used and has been kept here for backward
+ // compatibility, please use HostConfig.VolumesFrom.
+ VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"`
+}
+
+// HostMount represents a mount point in the container in HostConfig.
+//
+// It has been added in the version 1.25 of the Docker API
+type HostMount struct {
+ Target string `json:"Target,omitempty" yaml:"Target,omitempty" toml:"Target,omitempty"`
+ Source string `json:"Source,omitempty" yaml:"Source,omitempty" toml:"Source,omitempty"`
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"`
+ ReadOnly bool `json:"ReadOnly,omitempty" yaml:"ReadOnly,omitempty" toml:"ReadOnly,omitempty"`
+ BindOptions *BindOptions `json:"BindOptions,omitempty" yaml:"BindOptions,omitempty" toml:"BindOptions,omitempty"`
+ VolumeOptions *VolumeOptions `json:"VolumeOptions,omitempty" yaml:"VolumeOptions,omitempty" toml:"VolumeOptions,omitempty"`
+ TempfsOptions *TempfsOptions `json:"TempfsOptions,omitempty" yaml:"TempfsOptions,omitempty" toml:"TempfsOptions,omitempty"`
+}
+
+// BindOptions contains optional configuration for the bind type
+type BindOptions struct {
+ Propagation string `json:"Propagation,omitempty" yaml:"Propagation,omitempty" toml:"Propagation,omitempty"`
+}
+
+// VolumeOptions contains optional configuration for the volume type
+type VolumeOptions struct {
+ NoCopy bool `json:"NoCopy,omitempty" yaml:"NoCopy,omitempty" toml:"NoCopy,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"`
+ DriverConfig VolumeDriverConfig `json:"DriverConfig,omitempty" yaml:"DriverConfig,omitempty" toml:"DriverConfig,omitempty"`
+}
+
+// TempfsOptions contains optional configuration for the tempfs type
+type TempfsOptions struct {
+ SizeBytes int64 `json:"SizeBytes,omitempty" yaml:"SizeBytes,omitempty" toml:"SizeBytes,omitempty"`
+ Mode int `json:"Mode,omitempty" yaml:"Mode,omitempty" toml:"Mode,omitempty"`
+}
+
+// VolumeDriverConfig holds a map of volume driver specific options
+type VolumeDriverConfig struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"`
+}
+
+// Mount represents a mount point in the container.
+//
+// It has been added in the version 1.20 of the Docker API, available since
+// Docker 1.8.
+type Mount struct {
+ Name string
+ Source string
+ Destination string
+ Driver string
+ Mode string
+ RW bool
+}
+
+// LogConfig defines the log driver type and the configuration for it.
+type LogConfig struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"`
+ Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"`
+}
+
+// ULimit defines system-wide resource limitations This can help a lot in
+// system administration, e.g. when a user starts too many processes and
+// therefore makes the system unresponsive for other users.
+type ULimit struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty" toml:"Soft,omitempty"`
+ Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty" toml:"Hard,omitempty"`
+}
+
+// SwarmNode containers information about which Swarm node the container is on.
+type SwarmNode struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty" toml:"IP,omitempty"`
+ Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty" toml:"Addr,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty" toml:"CPUs,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"`
+}
+
+// GraphDriver contains information about the GraphDriver used by the
+// container.
+type GraphDriver struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Data map[string]string `json:"Data,omitempty" yaml:"Data,omitempty" toml:"Data,omitempty"`
+}
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature
+//
+// It has been added in the version 1.24 of the Docker API, available since
+// Docker 1.12.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:"Test,omitempty" yaml:"Test,omitempty" toml:"Test,omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:"Interval,omitempty" yaml:"Interval,omitempty" toml:"Interval,omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:"Timeout,omitempty" yaml:"Timeout,omitempty" toml:"Timeout,omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:"StartPeriod,omitempty" yaml:"StartPeriod,omitempty" toml:"StartPeriod,omitempty"` // The start period for the container to initialize before the retries starts to count down.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:"Retries,omitempty" yaml:"Retries,omitempty" toml:"Retries,omitempty"`
+}
+
+// Container is the type encompasing everything about a container - its config,
+// hostconfig, etc.
+type Container struct {
+ ID string `json:"Id" yaml:"Id" toml:"Id"`
+
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"`
+
+ Path string `json:"Path,omitempty" yaml:"Path,omitempty" toml:"Path,omitempty"`
+ Args []string `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"`
+
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"`
+ State State `json:"State,omitempty" yaml:"State,omitempty" toml:"State,omitempty"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty" toml:"Image,omitempty"`
+
+ Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty" toml:"Node,omitempty"`
+
+ NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty" toml:"NetworkSettings,omitempty"`
+
+ SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty" toml:"SysInitPath,omitempty"`
+ ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty" toml:"ResolvConfPath,omitempty"`
+ HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty" toml:"HostnamePath,omitempty"`
+ HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty" toml:"HostsPath,omitempty"`
+ LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty" toml:"LogPath,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"`
+
+ Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty" toml:"Volumes,omitempty"`
+ VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty" toml:"VolumesRW,omitempty"`
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"`
+ ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty" toml:"ExecIDs,omitempty"`
+ GraphDriver *GraphDriver `json:"GraphDriver,omitempty" yaml:"GraphDriver,omitempty" toml:"GraphDriver,omitempty"`
+
+ RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty" toml:"RestartCount,omitempty"`
+
+ AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty" toml:"AppArmorProfile,omitempty"`
+}
+
+// UpdateContainerOptions specify parameters to the UpdateContainer function.
+//
+// See https://goo.gl/Y6fXUy for more details.
+type UpdateContainerOptions struct {
+ BlkioWeight int `json:"BlkioWeight"`
+ CPUShares int `json:"CpuShares"`
+ CPUPeriod int `json:"CpuPeriod"`
+ CPURealtimePeriod int64 `json:"CpuRealtimePeriod"`
+ CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"`
+ CPUQuota int `json:"CpuQuota"`
+ CpusetCpus string `json:"CpusetCpus"`
+ CpusetMems string `json:"CpusetMems"`
+ Memory int `json:"Memory"`
+ MemorySwap int `json:"MemorySwap"`
+ MemoryReservation int `json:"MemoryReservation"`
+ KernelMemory int `json:"KernelMemory"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"`
+ Context context.Context
+}
+
+// UpdateContainer updates the container at ID with the options
+//
+// See https://goo.gl/Y6fXUy for more details.
+func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
+ resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{
+ data: opts,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
+
+// RenameContainerOptions specify parameters to the RenameContainer function.
+//
+// See https://goo.gl/46inai for more details.
+type RenameContainerOptions struct {
+ // ID of container to rename
+ ID string `qs:"-"`
+
+ // New name
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Context context.Context
+}
+
+// RenameContainer updates and existing containers name
+//
+// See https://goo.gl/46inai for more details.
+func (c *Client) RenameContainer(opts RenameContainerOptions) error {
+ resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{
+ context: opts.Context,
+ })
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// InspectContainer returns information about a container by its ID.
+//
+// See https://goo.gl/FaI5JT for more details.
+func (c *Client) InspectContainer(id string) (*Container, error) {
+ return c.inspectContainer(id, doOptions{})
+}
+
+// InspectContainerWithContext returns information about a container by its ID.
+// The context object can be used to cancel the inspect request.
+//
+// See https://goo.gl/FaI5JT for more details.
+func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) {
+ return c.inspectContainer(id, doOptions{context: ctx})
+}
+
+func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) {
+ path := "/containers/" + id + "/json"
+ resp, err := c.do("GET", path, opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+ return &container, nil
+}
+
+// ContainerChanges returns changes in the filesystem of the given container.
+//
+// See https://goo.gl/15KKzh for more details.
+func (c *Client) ContainerChanges(id string) ([]Change, error) {
+ path := "/containers/" + id + "/changes"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var changes []Change
+ if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// CreateContainerOptions specify parameters to the CreateContainer function.
+//
+// See https://goo.gl/tyzwVM for more details.
+type CreateContainerOptions struct {
+ Name string
+ Config *Config `qs:"-"`
+ HostConfig *HostConfig `qs:"-"`
+ NetworkingConfig *NetworkingConfig `qs:"-"`
+ Context context.Context
+}
+
+// CreateContainer creates a new container, returning the container instance,
+// or an error in case of failure.
+//
+// The returned container instance contains only the container ID. To get more
+// details about the container after creating it, use InspectContainer.
+//
+// See https://goo.gl/tyzwVM for more details.
+func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
+ path := "/containers/create?" + queryString(opts)
+ resp, err := c.do(
+ "POST",
+ path,
+ doOptions{
+ data: struct {
+ *Config
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"`
+ NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty" toml:"NetworkingConfig,omitempty"`
+ }{
+ opts.Config,
+ opts.HostConfig,
+ opts.NetworkingConfig,
+ },
+ context: opts.Context,
+ },
+ )
+
+ if e, ok := err.(*Error); ok {
+ if e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if e.Status == http.StatusConflict {
+ return nil, ErrContainerAlreadyExists
+ }
+ // Workaround for 17.09 bug returning 400 instead of 409.
+ // See https://github.com/moby/moby/issues/35021
+ if e.Status == http.StatusBadRequest && strings.Contains(e.Message, "Conflict.") {
+ return nil, ErrContainerAlreadyExists
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+
+ container.Name = opts.Name
+
+ return &container, nil
+}
+
+// KeyValuePair is a type for generic key/value pairs as used in the Lxc
+// configuration
+type KeyValuePair struct {
+ Key string `json:"Key,omitempty" yaml:"Key,omitempty" toml:"Key,omitempty"`
+ Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"`
+}
+
+// RestartPolicy represents the policy for automatically restarting a container.
+//
+// Possible values are:
+//
+// - always: the docker daemon will always restart the container
+// - on-failure: the docker daemon will restart the container on failures, at
+// most MaximumRetryCount times
+// - unless-stopped: the docker daemon will always restart the container except
+// when user has manually stopped the container
+// - no: the docker daemon will not restart the container automatically
+type RestartPolicy struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty" toml:"MaximumRetryCount,omitempty"`
+}
+
+// AlwaysRestart returns a restart policy that tells the Docker daemon to
+// always restart the container.
+func AlwaysRestart() RestartPolicy {
+ return RestartPolicy{Name: "always"}
+}
+
+// RestartOnFailure returns a restart policy that tells the Docker daemon to
+// restart the container on failures, trying at most maxRetry times.
+func RestartOnFailure(maxRetry int) RestartPolicy {
+ return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
+}
+
+// RestartUnlessStopped returns a restart policy that tells the Docker daemon to
+// always restart the container except when user has manually stopped the container.
+func RestartUnlessStopped() RestartPolicy {
+ return RestartPolicy{Name: "unless-stopped"}
+}
+
+// NeverRestart returns a restart policy that tells the Docker daemon to never
+// restart the container on failures.
+func NeverRestart() RestartPolicy {
+ return RestartPolicy{Name: "no"}
+}
+
+// Device represents a device mapping between the Docker host and the
+// container.
+type Device struct {
+ PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty" toml:"PathOnHost,omitempty"`
+ PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty" toml:"PathInContainer,omitempty"`
+ CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty" toml:"CgroupPermissions,omitempty"`
+}
+
+// BlockWeight represents a relative device weight for an individual device inside
+// of a container
+type BlockWeight struct {
+ Path string `json:"Path,omitempty"`
+ Weight string `json:"Weight,omitempty"`
+}
+
+// BlockLimit represents a read/write limit in IOPS or Bandwidth for a device
+// inside of a container
+type BlockLimit struct {
+ Path string `json:"Path,omitempty"`
+ Rate int64 `json:"Rate,omitempty"`
+}
+
+// HostConfig contains the container options related to starting a container on
+// a given host
+type HostConfig struct {
+ Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty" toml:"Binds,omitempty"`
+ CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty" toml:"CapAdd,omitempty"`
+ CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty" toml:"CapDrop,omitempty"`
+ GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty" toml:"GroupAdd,omitempty"`
+ ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty" toml:"ContainerIDFile,omitempty"`
+ LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty" toml:"LxcConf,omitempty"`
+ PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty" toml:"PortBindings,omitempty"`
+ Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty" toml:"Dns,omitempty"` // For Docker API v1.10 and above only
+ DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty" toml:"DnsOptions,omitempty"`
+ DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty" toml:"DnsSearch,omitempty"`
+ ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty" toml:"ExtraHosts,omitempty"`
+ VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty" toml:"VolumesFrom,omitempty"`
+ UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty" toml:"UsernsMode,omitempty"`
+ NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty" toml:"NetworkMode,omitempty"`
+ IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty" toml:"IpcMode,omitempty"`
+ PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty" toml:"PidMode,omitempty"`
+ UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty" toml:"UTSMode,omitempty"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty" toml:"RestartPolicy,omitempty"`
+ Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"`
+ DeviceCgroupRules []string `json:"DeviceCgroupRules,omitempty" yaml:"DeviceCgroupRules,omitempty" toml:"DeviceCgroupRules,omitempty"`
+ LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty" toml:"LogConfig,omitempty"`
+ SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty" toml:"SecurityOpt,omitempty"`
+ Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty" toml:"Cgroup,omitempty"`
+ CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty" toml:"CgroupParent,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty" toml:"Memory,omitempty"`
+ MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty" toml:"MemoryReservation,omitempty"`
+ KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty" toml:"KernelMemory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty" toml:"MemorySwap,omitempty"`
+ MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty" toml:"MemorySwappiness,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty" toml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty" toml:"Cpuset,omitempty"`
+ CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty" toml:"CpusetCpus,omitempty"`
+ CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty" toml:"CpusetMems,omitempty"`
+ CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty" toml:"CpuQuota,omitempty"`
+ CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty" toml:"CpuPeriod,omitempty"`
+ CPURealtimePeriod int64 `json:"CpuRealtimePeriod,omitempty" yaml:"CpuRealtimePeriod,omitempty" toml:"CpuRealtimePeriod,omitempty"`
+ CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime,omitempty" yaml:"CpuRealtimeRuntime,omitempty" toml:"CpuRealtimeRuntime,omitempty"`
+ BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight,omitempty" toml:"BlkioWeight,omitempty"`
+ BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice,omitempty" toml:"BlkioWeightDevice,omitempty"`
+ BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps,omitempty" toml:"BlkioDeviceReadBps,omitempty"`
+ BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps,omitempty" toml:"BlkioDeviceReadIOps,omitempty"`
+ BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps,omitempty" toml:"BlkioDeviceWriteBps,omitempty"`
+ BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps,omitempty" toml:"BlkioDeviceWriteIOps,omitempty"`
+ Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty" toml:"Ulimits,omitempty"`
+ VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty" toml:"VolumeDriver,omitempty"`
+ OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty" toml:"OomScoreAdj,omitempty"`
+ PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty" toml:"PidsLimit,omitempty"`
+ ShmSize int64 `json:"ShmSize,omitempty" yaml:"ShmSize,omitempty" toml:"ShmSize,omitempty"`
+ Tmpfs map[string]string `json:"Tmpfs,omitempty" yaml:"Tmpfs,omitempty" toml:"Tmpfs,omitempty"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"`
+ PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty" toml:"PublishAllPorts,omitempty"`
+ ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty" toml:"ReadonlyRootfs,omitempty"`
+ OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty" toml:"OomKillDisable,omitempty"`
+ AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty" toml:"AutoRemove,omitempty"`
+ StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty" toml:"StorageOpt,omitempty"`
+ Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty" toml:"Sysctls,omitempty"`
+ CPUCount int64 `json:"CpuCount,omitempty" yaml:"CpuCount,omitempty"`
+ CPUPercent int64 `json:"CpuPercent,omitempty" yaml:"CpuPercent,omitempty"`
+ IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"`
+ IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"`
+ Mounts []HostMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"`
+ Init bool `json:",omitempty" yaml:",omitempty"`
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointConfig `json:"EndpointsConfig" yaml:"EndpointsConfig" toml:"EndpointsConfig"` // Endpoint configs for each connecting network
+}
+
+// StartContainer starts a container, returning an error in case of failure.
+//
+// Passing the HostConfig to this method has been deprecated in Docker API 1.22
+// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
+// 1.12.x). The client will ignore the parameter when communicating with Docker
+// API 1.24 or greater.
+//
+// See https://goo.gl/fbOSZy for more details.
+func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
+ return c.startContainer(id, hostConfig, doOptions{})
+}
+
+// StartContainerWithContext starts a container, returning an error in case of
+// failure. The context can be used to cancel the outstanding start container
+// request.
+//
+// Passing the HostConfig to this method has been deprecated in Docker API 1.22
+// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
+// 1.12.x). The client will ignore the parameter when communicating with Docker
+// API 1.24 or greater.
+//
+// See https://goo.gl/fbOSZy for more details.
+func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error {
+ return c.startContainer(id, hostConfig, doOptions{context: ctx})
+}
+
+func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOptions) error {
+ path := "/containers/" + id + "/start"
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) {
+ opts.data = hostConfig
+ opts.forceJSON = true
+ }
+ resp, err := c.do("POST", path, opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id, Err: err}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerAlreadyRunning{ID: id}
+ }
+ return nil
+}
+
+// StopContainer stops a container, killing it after the given timeout (in
+// seconds).
+//
+// See https://goo.gl/R9dZcV for more details.
+func (c *Client) StopContainer(id string, timeout uint) error {
+ return c.stopContainer(id, timeout, doOptions{})
+}
+
+// StopContainerWithContext stops a container, killing it after the given
+// timeout (in seconds). The context can be used to cancel the stop
+// container request.
+//
+// See https://goo.gl/R9dZcV for more details.
+func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error {
+ return c.stopContainer(id, timeout, doOptions{context: ctx})
+}
+
+func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error {
+ path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
+ resp, err := c.do("POST", path, opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerNotRunning{ID: id}
+ }
+ return nil
+}
+
+// RestartContainer stops a container, killing it after the given timeout (in
+// seconds), during the stop process.
+//
+// See https://goo.gl/MrAKQ5 for more details.
+func (c *Client) RestartContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// PauseContainer pauses the given container.
+//
+// See https://goo.gl/D1Yaii for more details.
+func (c *Client) PauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/pause", id)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UnpauseContainer unpauses the given container.
+//
+// See https://goo.gl/sZ2faO for more details.
+func (c *Client) UnpauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/unpause", id)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// TopResult represents the list of processes running in a container, as
+// returned by /containers/<id>/top.
+//
+// See https://goo.gl/FLwpPl for more details.
+type TopResult struct {
+ Titles []string
+ Processes [][]string
+}
+
+// TopContainer returns processes running inside a container
+//
+// See https://goo.gl/FLwpPl for more details.
+func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
+ var args string
+ var result TopResult
+ if psArgs != "" {
+ args = fmt.Sprintf("?ps_args=%s", psArgs)
+ }
+ path := fmt.Sprintf("/containers/%s/top%s", id, args)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return result, &NoSuchContainer{ID: id}
+ }
+ return result, err
+ }
+ defer resp.Body.Close()
+ err = json.NewDecoder(resp.Body).Decode(&result)
+ return result, err
+}
+
+// Stats represents container statistics, returned by /containers/<id>/stats.
+//
+// See https://goo.gl/Dk3Xio for more details.
+type Stats struct {
+ Read time.Time `json:"read,omitempty" yaml:"read,omitempty" toml:"read,omitempty"`
+ PreRead time.Time `json:"preread,omitempty" yaml:"preread,omitempty" toml:"preread,omitempty"`
+ NumProcs uint32 `json:"num_procs" yaml:"num_procs" toml:"num_procs"`
+ PidsStats struct {
+ Current uint64 `json:"current,omitempty" yaml:"current,omitempty"`
+ } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty" toml:"pids_stats,omitempty"`
+ Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty" toml:"network,omitempty"`
+ Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty" toml:"networks,omitempty"`
+ MemoryStats struct {
+ Stats struct {
+ TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" toml:"total_pgmafault,omitempty"`
+ Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty" toml:"cache,omitempty"`
+ MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" toml:"mapped_file,omitempty"`
+ TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" toml:"total_inactive_file,omitempty"`
+ Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" toml:"pgpgout,omitempty"`
+ Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty" toml:"rss,omitempty"`
+ TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" toml:"total_mapped_file,omitempty"`
+ Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty" toml:"writeback,omitempty"`
+ Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty" toml:"unevictable,omitempty"`
+ Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" toml:"pgpgin,omitempty"`
+ TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" toml:"total_unevictable,omitempty"`
+ Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" toml:"pgmajfault,omitempty"`
+ TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty" toml:"total_rss,omitempty"`
+ TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" toml:"total_rss_huge,omitempty"`
+ TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty" toml:"total_writeback,omitempty"`
+ TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" toml:"total_inactive_anon,omitempty"`
+ RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" toml:"rss_huge,omitempty"`
+ HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" toml:"hierarchical_memory_limit,omitempty"`
+ TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" toml:"total_pgfault,omitempty"`
+ TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty" toml:"total_active_file,omitempty"`
+ ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty" toml:"active_anon,omitempty"`
+ TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" toml:"total_active_anon,omitempty"`
+ TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" toml:"total_pgpgout,omitempty"`
+ TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty" toml:"total_cache,omitempty"`
+ InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" toml:"inactive_anon,omitempty"`
+ ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty" toml:"active_file,omitempty"`
+ Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty" toml:"pgfault,omitempty"`
+ InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" toml:"inactive_file,omitempty"`
+ TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"`
+ HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"`
+ Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"`
+ } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"`
+ Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"`
+ Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty" toml:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty" toml:"limit,omitempty"`
+ Commit uint64 `json:"commitbytes,omitempty" yaml:"commitbytes,omitempty" toml:"privateworkingset,omitempty"`
+ CommitPeak uint64 `json:"commitpeakbytes,omitempty" yaml:"commitpeakbytes,omitempty" toml:"commitpeakbytes,omitempty"`
+ PrivateWorkingSet uint64 `json:"privateworkingset,omitempty" yaml:"privateworkingset,omitempty" toml:"privateworkingset,omitempty"`
+ } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty" toml:"memory_stats,omitempty"`
+ BlkioStats struct {
+ IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty" toml:"io_service_bytes_recursive,omitempty"`
+ IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty" toml:"io_serviced_recursive,omitempty"`
+ IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty" toml:"io_queue_recursive,omitempty"`
+ IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty" toml:"io_service_time_recursive,omitempty"`
+ IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty" toml:"io_wait_time_recursive,omitempty"`
+ IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty" toml:"io_merged_recursive,omitempty"`
+ IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty" toml:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty" toml:"sectors_recursive,omitempty"`
+ } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty" toml:"blkio_stats,omitempty"`
+ CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty" toml:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
+ StorageStats struct {
+ ReadCountNormalized uint64 `json:"read_count_normalized,omitempty" yaml:"read_count_normalized,omitempty" toml:"read_count_normalized,omitempty"`
+ ReadSizeBytes uint64 `json:"read_size_bytes,omitempty" yaml:"read_size_bytes,omitempty" toml:"read_size_bytes,omitempty"`
+ WriteCountNormalized uint64 `json:"write_count_normalized,omitempty" yaml:"write_count_normalized,omitempty" toml:"write_count_normalized,omitempty"`
+ WriteSizeBytes uint64 `json:"write_size_bytes,omitempty" yaml:"write_size_bytes,omitempty" toml:"write_size_bytes,omitempty"`
+ } `json:"storage_stats,omitempty" yaml:"storage_stats,omitempty" toml:"storage_stats,omitempty"`
+}
+
+// NetworkStats is a stats entry for network stats
+type NetworkStats struct {
+ RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty" toml:"rx_dropped,omitempty"`
+ RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty" toml:"rx_bytes,omitempty"`
+ RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty" toml:"rx_errors,omitempty"`
+ TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty" toml:"tx_packets,omitempty"`
+ TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty" toml:"tx_dropped,omitempty"`
+ RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty" toml:"rx_packets,omitempty"`
+ TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty" toml:"tx_errors,omitempty"`
+ TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty" toml:"tx_bytes,omitempty"`
+}
+
+// CPUStats is a stats entry for cpu stats
+type CPUStats struct {
+ CPUUsage struct {
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty" toml:"percpu_usage,omitempty"`
+ UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty" toml:"usage_in_usermode,omitempty"`
+ TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty" toml:"total_usage,omitempty"`
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty" toml:"usage_in_kernelmode,omitempty"`
+ } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty" toml:"cpu_usage,omitempty"`
+ SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty" toml:"system_cpu_usage,omitempty"`
+ OnlineCPUs uint64 `json:"online_cpus,omitempty" yaml:"online_cpus,omitempty" toml:"online_cpus,omitempty"`
+ ThrottlingData struct {
+ Periods uint64 `json:"periods,omitempty"`
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+ } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty" toml:"throttling_data,omitempty"`
+}
+
+// BlkioStatsEntry is a stats entry for blkio_stats
+type BlkioStatsEntry struct {
+ Major uint64 `json:"major,omitempty" yaml:"major,omitempty" toml:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty" toml:"minor,omitempty"`
+ Op string `json:"op,omitempty" yaml:"op,omitempty" toml:"op,omitempty"`
+ Value uint64 `json:"value,omitempty" yaml:"value,omitempty" toml:"value,omitempty"`
+}
+
+// StatsOptions specify parameters to the Stats function.
+//
+// See https://goo.gl/Dk3Xio for more details.
+type StatsOptions struct {
+ ID string
+ Stats chan<- *Stats
+ Stream bool
+ // A flag that enables stopping the stats operation
+ Done <-chan bool
+ // Initial connection timeout
+ Timeout time.Duration
+ // Timeout with no data is received, it's reset every time new data
+ // arrives
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// Stats sends container statistics for the given container to the given channel.
+//
+// This function is blocking, similar to a streaming call for logs, and should be run
+// on a separate goroutine from the caller. Note that this function will block until
+// the given container is removed, not just exited. When finished, this function
+// will close the given channel. Alternatively, function can be stopped by
+// signaling on the Done channel.
+//
+// See https://goo.gl/Dk3Xio for more details.
+func (c *Client) Stats(opts StatsOptions) (retErr error) {
+ errC := make(chan error, 1)
+ readCloser, writeCloser := io.Pipe()
+
+ defer func() {
+ close(opts.Stats)
+
+ select {
+ case err := <-errC:
+ if err != nil && retErr == nil {
+ retErr = err
+ }
+ default:
+ // No errors
+ }
+
+ if err := readCloser.Close(); err != nil && retErr == nil {
+ retErr = err
+ }
+ }()
+
+ reqSent := make(chan struct{})
+ go func() {
+ err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
+ rawJSONStream: true,
+ useJSONDecoder: true,
+ stdout: writeCloser,
+ timeout: opts.Timeout,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ reqSent: reqSent,
+ })
+ if err != nil {
+ dockerError, ok := err.(*Error)
+ if ok {
+ if dockerError.Status == http.StatusNotFound {
+ err = &NoSuchContainer{ID: opts.ID}
+ }
+ }
+ }
+ if closeErr := writeCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ errC <- err
+ close(errC)
+ }()
+
+ quit := make(chan struct{})
+ defer close(quit)
+ go func() {
+ // block here waiting for the signal to stop function
+ select {
+ case <-opts.Done:
+ readCloser.Close()
+ case <-quit:
+ return
+ }
+ }()
+
+ decoder := json.NewDecoder(readCloser)
+ stats := new(Stats)
+ <-reqSent
+ for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) {
+ if err != nil {
+ return err
+ }
+ opts.Stats <- stats
+ stats = new(Stats)
+ }
+ return nil
+}
+
+// KillContainerOptions represents the set of options that can be used in a
+// call to KillContainer.
+//
+// See https://goo.gl/JnTxXZ for more details.
+type KillContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // The signal to send to the container. When omitted, Docker server
+ // will assume SIGKILL.
+ Signal Signal
+ Context context.Context
+}
+
+// KillContainer sends a signal to a container, returning an error in case of
+// failure.
+//
+// See https://goo.gl/JnTxXZ for more details.
+func (c *Client) KillContainer(opts KillContainerOptions) error {
+ path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ if err != nil {
+ e, ok := err.(*Error)
+ if !ok {
+ return err
+ }
+ switch e.Status {
+ case http.StatusNotFound:
+ return &NoSuchContainer{ID: opts.ID}
+ case http.StatusConflict:
+ return &ContainerNotRunning{ID: opts.ID}
+ default:
+ return err
+ }
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RemoveContainerOptions encapsulates options to remove a container.
+//
+// See https://goo.gl/hL5IPC for more details.
+type RemoveContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // A flag that indicates whether Docker should remove the volumes
+ // associated to the container.
+ RemoveVolumes bool `qs:"v"`
+
+ // A flag that indicates whether Docker should remove the container
+ // even if it is currently running.
+ Force bool
+ Context context.Context
+}
+
+// RemoveContainer removes a container, returning an error in case of failure.
+//
+// See https://goo.gl/hL5IPC for more details.
+func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
+ path := "/containers/" + opts.ID + "?" + queryString(opts)
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UploadToContainerOptions is the set of options that can be used when
+// uploading an archive into a container.
+//
+// See https://goo.gl/g25o7u for more details.
+type UploadToContainerOptions struct {
+ InputStream io.Reader `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"`
+ Context context.Context
+}
+
+// UploadToContainer uploads a tar archive to be extracted to a path in the
+// filesystem of the container.
+//
+// See https://goo.gl/g25o7u for more details.
+func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream("PUT", url, streamOptions{
+ in: opts.InputStream,
+ context: opts.Context,
+ })
+}
+
+// DownloadFromContainerOptions is the set of options that can be used when
+// downloading resources from a container.
+//
+// See https://goo.gl/W49jxK for more details.
+type DownloadFromContainerOptions struct {
+ OutputStream io.Writer `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// DownloadFromContainer downloads a tar archive of files or folders in a container.
+//
+// See https://goo.gl/W49jxK for more details.
+func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream("GET", url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
+
+// CopyFromContainerOptions contains the set of options used for copying
+// files from a container.
+//
+// Deprecated: Use DownloadFromContainerOptions and DownloadFromContainer instead.
+type CopyFromContainerOptions struct {
+ OutputStream io.Writer `json:"-"`
+ Container string `json:"-"`
+ Resource string
+ Context context.Context `json:"-"`
+}
+
+// CopyFromContainer copies files from a container.
+//
+// Deprecated: Use DownloadFromContainer and DownloadFromContainer instead.
+func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion124) {
+ return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead")
+ }
+ url := fmt.Sprintf("/containers/%s/copy", opts.Container)
+ resp, err := c.do("POST", url, doOptions{
+ data: opts,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ _, err = io.Copy(opts.OutputStream, resp.Body)
+ return err
+}
+
+// WaitContainer blocks until the given container stops, return the exit code
+// of the container status.
+//
+// See https://goo.gl/4AGweZ for more details.
+func (c *Client) WaitContainer(id string) (int, error) {
+ return c.waitContainer(id, doOptions{})
+}
+
+// WaitContainerWithContext blocks until the given container stops, return the exit code
+// of the container status. The context object can be used to cancel the
+// inspect request.
+//
+// See https://goo.gl/4AGweZ for more details.
+func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) {
+ return c.waitContainer(id, doOptions{context: ctx})
+}
+
+func (c *Client) waitContainer(id string, opts doOptions) (int, error) {
+ resp, err := c.do("POST", "/containers/"+id+"/wait", opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return 0, &NoSuchContainer{ID: id}
+ }
+ return 0, err
+ }
+ defer resp.Body.Close()
+ var r struct{ StatusCode int }
+ if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return 0, err
+ }
+ return r.StatusCode, nil
+}
+
+// CommitContainerOptions aggregates parameters to the CommitContainer method.
+//
+// See https://goo.gl/CzIguf for more details.
+type CommitContainerOptions struct {
+ Container string
+ Repository string `qs:"repo"`
+ Tag string
+ Message string `qs:"comment"`
+ Author string
+ Changes []string `qs:"changes"`
+ Run *Config `qs:"-"`
+ Context context.Context
+}
+
+// CommitContainer creates a new image from a container's changes.
+//
+// See https://goo.gl/CzIguf for more details.
+func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
+ path := "/commit?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.Run,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var image Image
+ if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
+ return nil, err
+ }
+ return &image, nil
+}
+
+// AttachToContainerOptions is the set of options that can be used when
+// attaching to a container.
+//
+// See https://goo.gl/JF10Zk for more details.
+type AttachToContainerOptions struct {
+ Container string `qs:"-"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{}
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+
+ // Get container logs, sending it to OutputStream.
+ Logs bool
+
+ // Stream the response?
+ Stream bool
+
+ // Attach to stdin, and use InputStream.
+ Stdin bool
+
+ // Attach to stdout, and use OutputStream.
+ Stdout bool
+
+ // Attach to stderr, and use ErrorStream.
+ Stderr bool
+}
+
+// AttachToContainer attaches to a container, using the given options.
+//
+// See https://goo.gl/JF10Zk for more details.
+func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
+ cw, err := c.AttachToContainerNonBlocking(opts)
+ if err != nil {
+ return err
+ }
+ return cw.Wait()
+}
+
+// AttachToContainerNonBlocking attaches to a container, using the given options.
+// This function does not block.
+//
+// See https://goo.gl/NKpkFk for more details.
+func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) {
+ if opts.Container == "" {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ })
+}
+
+// LogsOptions represents the set of options used when getting logs from a
+// container.
+//
+// See https://goo.gl/krK0ZH for more details.
+type LogsOptions struct {
+ Context context.Context
+ Container string `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Tail string
+
+ Since int64
+ Follow bool
+ Stdout bool
+ Stderr bool
+ Timestamps bool
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// Logs gets stdout and stderr logs from the specified container.
+//
+// When LogsOptions.RawTerminal is set to false, go-dockerclient will multiplex
+// the streams and send the containers stdout to LogsOptions.OutputStream, and
+// stderr to LogsOptions.ErrorStream.
+//
+// When LogsOptions.RawTerminal is true, callers will get the raw stream on
+// LogsOptions.OutputStream. The caller can use libraries such as dlog
+// (github.com/ahmetalpbalkan/dlog).
+//
+// See https://goo.gl/krK0ZH for more details.
+func (c *Client) Logs(opts LogsOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if opts.Tail == "" {
+ opts.Tail = "all"
+ }
+ path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
+ return c.stream("GET", path, streamOptions{
+ setRawTerminal: opts.RawTerminal,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
+
+// ResizeContainerTTY resizes the terminal to the given height and width.
+//
+// See https://goo.gl/FImjeq for more details.
+func (c *Client) ResizeContainerTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+ resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ExportContainerOptions is the set of parameters to the ExportContainer
+// method.
+//
+// See https://goo.gl/yGJCIh for more details.
+type ExportContainerOptions struct {
+ ID string
+ OutputStream io.Writer
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// ExportContainer export the contents of container id as tar archive
+// and prints the exported contents to stdout.
+//
+// See https://goo.gl/yGJCIh for more details.
+func (c *Client) ExportContainer(opts ExportContainerOptions) error {
+ if opts.ID == "" {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ url := fmt.Sprintf("/containers/%s/export", opts.ID)
+ return c.stream("GET", url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
+
+// PruneContainersOptions specify parameters to the PruneContainers function.
+//
+// See https://goo.gl/wnkgDT for more details.
+type PruneContainersOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// PruneContainersResults specify results from the PruneContainers function.
+//
+// See https://goo.gl/wnkgDT for more details.
+type PruneContainersResults struct {
+ ContainersDeleted []string
+ SpaceReclaimed int64
+}
+
+// PruneContainers deletes containers which are stopped.
+//
+// See https://goo.gl/wnkgDT for more details.
+func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) {
+ path := "/containers/prune?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var results PruneContainersResults
+ if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
+ return nil, err
+ }
+ return &results, nil
+}
+
+// NoSuchContainer is the error returned when a given container does not exist.
+type NoSuchContainer struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchContainer) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such container: " + err.ID
+}
+
+// ContainerAlreadyRunning is the error returned when a given container is
+// already running.
+type ContainerAlreadyRunning struct {
+ ID string
+}
+
+func (err *ContainerAlreadyRunning) Error() string {
+ return "Container already running: " + err.ID
+}
+
+// ContainerNotRunning is the error returned when a given container is not
+// running.
+type ContainerNotRunning struct {
+ ID string
+}
+
+func (err *ContainerNotRunning) Error() string {
+ return "Container not running: " + err.ID
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/distribution.go b/vendor/github.com/fsouza/go-dockerclient/distribution.go
new file mode 100644
index 000000000..d0f8ce74c
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/distribution.go
@@ -0,0 +1,26 @@
+// Copyright 2017 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/registry"
+)
+
+// InspectDistribution returns image digest and platform information by contacting the registry
+func (c *Client) InspectDistribution(name string) (*registry.DistributionInspect, error) {
+ path := "/distribution/" + name + "/json"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var distributionInspect registry.DistributionInspect
+ if err := json.NewDecoder(resp.Body).Decode(&distributionInspect); err != nil {
+ return nil, err
+ }
+ return &distributionInspect, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/fsouza/go-dockerclient/env.go
new file mode 100644
index 000000000..13fedfb17
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/env.go
@@ -0,0 +1,172 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Env represents a list of key-pair represented in the form KEY=VALUE.
+type Env []string
+
+// Get returns the string value of the given key.
+func (env *Env) Get(key string) (value string) {
+ return env.Map()[key]
+}
+
+// Exists checks whether the given key is defined in the internal Env
+// representation.
+func (env *Env) Exists(key string) bool {
+ _, exists := env.Map()[key]
+ return exists
+}
+
+// GetBool returns a boolean representation of the given key. The key is false
+// whenever its value if 0, no, false, none or an empty string. Any other value
+// will be interpreted as true.
+func (env *Env) GetBool(key string) (value bool) {
+ s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
+ if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
+ return false
+ }
+ return true
+}
+
+// SetBool defines a boolean value to the given key.
+func (env *Env) SetBool(key string, value bool) {
+ if value {
+ env.Set(key, "1")
+ } else {
+ env.Set(key, "0")
+ }
+}
+
+// GetInt returns the value of the provided key, converted to int.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt(key string) int {
+ return int(env.GetInt64(key))
+}
+
+// SetInt defines an integer value to the given key.
+func (env *Env) SetInt(key string, value int) {
+ env.Set(key, strconv.Itoa(value))
+}
+
+// GetInt64 returns the value of the provided key, converted to int64.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt64(key string) int64 {
+ s := strings.Trim(env.Get(key), " \t")
+ val, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return -1
+ }
+ return val
+}
+
+// SetInt64 defines an integer (64-bit wide) value to the given key.
+func (env *Env) SetInt64(key string, value int64) {
+ env.Set(key, strconv.FormatInt(value, 10))
+}
+
+// GetJSON unmarshals the value of the provided key in the provided iface.
+//
+// iface is a value that can be provided to the json.Unmarshal function.
+func (env *Env) GetJSON(key string, iface interface{}) error {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ return json.Unmarshal([]byte(sval), iface)
+}
+
+// SetJSON marshals the given value to JSON format and stores it using the
+// provided key.
+func (env *Env) SetJSON(key string, value interface{}) error {
+ sval, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ env.Set(key, string(sval))
+ return nil
+}
+
+// GetList returns a list of strings matching the provided key. It handles the
+// list as a JSON representation of a list of strings.
+//
+// If the given key matches to a single string, it will return a list
+// containing only the value that matches the key.
+func (env *Env) GetList(key string) []string {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ var l []string
+ if err := json.Unmarshal([]byte(sval), &l); err != nil {
+ l = append(l, sval)
+ }
+ return l
+}
+
+// SetList stores the given list in the provided key, after serializing it to
+// JSON format.
+func (env *Env) SetList(key string, value []string) error {
+ return env.SetJSON(key, value)
+}
+
+// Set defines the value of a key to the given string.
+func (env *Env) Set(key, value string) {
+ *env = append(*env, key+"="+value)
+}
+
+// Decode decodes `src` as a json dictionary, and adds each decoded key-value
+// pair to the environment.
+//
+// If `src` cannot be decoded as a json dictionary, an error is returned.
+func (env *Env) Decode(src io.Reader) error {
+ m := make(map[string]interface{})
+ if err := json.NewDecoder(src).Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ env.SetAuto(k, v)
+ }
+ return nil
+}
+
+// SetAuto will try to define the Set* method to call based on the given value.
+func (env *Env) SetAuto(key string, value interface{}) {
+ if fval, ok := value.(float64); ok {
+ env.SetInt64(key, int64(fval))
+ } else if sval, ok := value.(string); ok {
+ env.Set(key, sval)
+ } else if val, err := json.Marshal(value); err == nil {
+ env.Set(key, string(val))
+ } else {
+ env.Set(key, fmt.Sprintf("%v", value))
+ }
+}
+
+// Map returns the map representation of the env.
+func (env *Env) Map() map[string]string {
+ if len(*env) == 0 {
+ return nil
+ }
+ m := make(map[string]string)
+ for _, kv := range *env {
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) == 1 {
+ m[parts[0]] = ""
+ } else {
+ m[parts[0]] = parts[1]
+ }
+ }
+ return m
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go
new file mode 100644
index 000000000..18ae5d5a6
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/event.go
@@ -0,0 +1,410 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// APIEvents represents events coming from the Docker API
+// The fields in the Docker API changed in API version 1.22, and
+// events for more than images and containers are now fired off.
+// To maintain forward and backward compatibility, go-dockerclient
+// replicates the event in both the new and old format as faithfully as possible.
+//
+// For events that only exist in 1.22 in later, `Status` is filled in as
+// `"Type:Action"` instead of just `Action` to allow for older clients to
+// differentiate and not break if they rely on the pre-1.22 Status types.
+//
+// The transformEvent method can be consulted for more information about how
+// events are translated from new/old API formats
+type APIEvents struct {
+ // New API Fields in 1.22
+ Action string `json:"action,omitempty"`
+ Type string `json:"type,omitempty"`
+ Actor APIActor `json:"actor,omitempty"`
+
+ // Old API fields for < 1.22
+ Status string `json:"status,omitempty"`
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+
+ // Fields in both
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+}
+
+// APIActor represents an actor that accomplishes something for an event
+type APIActor struct {
+ ID string `json:"id,omitempty"`
+ Attributes map[string]string `json:"attributes,omitempty"`
+}
+
+type eventMonitoringState struct {
+ // `sync/atomic` expects the first word in an allocated struct to be 64-bit
+ // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details.
+ lastSeen int64
+ sync.RWMutex
+ sync.WaitGroup
+ enabled bool
+ C chan *APIEvents
+ errC chan error
+ listeners []chan<- *APIEvents
+}
+
+const (
+ maxMonitorConnRetries = 5
+ retryInitialWaitTime = 10.
+)
+
+var (
+ // ErrNoListeners is the error returned when no listeners are available
+ // to receive an event.
+ ErrNoListeners = errors.New("no listeners present to receive event")
+
+ // ErrListenerAlreadyExists is the error returned when the listerner already
+ // exists.
+ ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
+
+ // ErrTLSNotSupported is the error returned when the client does not support
+ // TLS (this applies to the Windows named pipe client).
+ ErrTLSNotSupported = errors.New("tls not supported by this client")
+
+ // EOFEvent is sent when the event listener receives an EOF error.
+ EOFEvent = &APIEvents{
+ Type: "EOF",
+ Status: "EOF",
+ }
+)
+
+// AddEventListener adds a new listener to container events in the Docker API.
+//
+// The parameter is a channel through which events will be sent.
+func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
+ var err error
+ if !c.eventMonitor.isEnabled() {
+ err = c.eventMonitor.enableEventMonitoring(c)
+ if err != nil {
+ return err
+ }
+ }
+ return c.eventMonitor.addListener(listener)
+}
+
+// RemoveEventListener removes a listener from the monitor.
+func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
+ err := c.eventMonitor.removeListener(listener)
+ if err != nil {
+ return err
+ }
+ if c.eventMonitor.listernersCount() == 0 {
+ c.eventMonitor.disableEventMonitoring()
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ return ErrListenerAlreadyExists
+ }
+ eventState.Add(1)
+ eventState.listeners = append(eventState.listeners, listener)
+ return nil
+}
+
+func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ var newListeners []chan<- *APIEvents
+ for _, l := range eventState.listeners {
+ if l != listener {
+ newListeners = append(newListeners, l)
+ }
+ }
+ eventState.listeners = newListeners
+ eventState.Add(-1)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) closeListeners() {
+ for _, l := range eventState.listeners {
+ close(l)
+ eventState.Add(-1)
+ }
+ eventState.listeners = nil
+}
+
+func (eventState *eventMonitoringState) listernersCount() int {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners)
+}
+
+func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
+ for _, b := range *list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if !eventState.enabled {
+ eventState.enabled = true
+ atomic.StoreInt64(&eventState.lastSeen, 0)
+ eventState.C = make(chan *APIEvents, 100)
+ eventState.errC = make(chan error, 1)
+ go eventState.monitorEvents(c)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) disableEventMonitoring() error {
+ eventState.Lock()
+ defer eventState.Unlock()
+
+ eventState.closeListeners()
+
+ eventState.Wait()
+
+ if eventState.enabled {
+ eventState.enabled = false
+ close(eventState.C)
+ close(eventState.errC)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) monitorEvents(c *Client) {
+ const (
+ noListenersTimeout = 5 * time.Second
+ noListenersInterval = 10 * time.Millisecond
+ noListenersMaxTries = noListenersTimeout / noListenersInterval
+ )
+
+ var err error
+ for i := time.Duration(0); i < noListenersMaxTries && eventState.noListeners(); i++ {
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ if eventState.noListeners() {
+ // terminate if no listener is available after 5 seconds.
+ // Prevents goroutine leak when RemoveEventListener is called
+ // right after AddEventListener.
+ eventState.disableEventMonitoring()
+ return
+ }
+
+ if err = eventState.connectWithRetry(c); err != nil {
+ // terminate if connect failed
+ eventState.disableEventMonitoring()
+ return
+ }
+ for eventState.isEnabled() {
+ timeout := time.After(100 * time.Millisecond)
+ select {
+ case ev, ok := <-eventState.C:
+ if !ok {
+ return
+ }
+ if ev == EOFEvent {
+ eventState.disableEventMonitoring()
+ return
+ }
+ eventState.updateLastSeen(ev)
+ eventState.sendEvent(ev)
+ case err = <-eventState.errC:
+ if err == ErrNoListeners {
+ eventState.disableEventMonitoring()
+ return
+ } else if err != nil {
+ defer func() { go eventState.monitorEvents(c) }()
+ return
+ }
+ case <-timeout:
+ continue
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
+ var retries int
+ eventState.RLock()
+ eventChan := eventState.C
+ errChan := eventState.errC
+ eventState.RUnlock()
+ err := c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan)
+ for ; err != nil && retries < maxMonitorConnRetries; retries++ {
+ waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
+ time.Sleep(time.Duration(waitTime) * time.Millisecond)
+ eventState.RLock()
+ eventChan = eventState.C
+ errChan = eventState.errC
+ eventState.RUnlock()
+ err = c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan)
+ }
+ return err
+}
+
+func (eventState *eventMonitoringState) noListeners() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners) == 0
+}
+
+func (eventState *eventMonitoringState) isEnabled() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return eventState.enabled
+}
+
+func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ eventState.Add(1)
+ defer eventState.Done()
+ if eventState.enabled {
+ if len(eventState.listeners) == 0 {
+ eventState.errC <- ErrNoListeners
+ return
+ }
+
+ for _, listener := range eventState.listeners {
+ select {
+ case listener <- event:
+ default:
+ }
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if atomic.LoadInt64(&eventState.lastSeen) < e.Time {
+ atomic.StoreInt64(&eventState.lastSeen, e.Time)
+ }
+}
+
+func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
+ uri := "/events"
+ if startTime != 0 {
+ uri += fmt.Sprintf("?since=%d", startTime)
+ }
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" && protocol != "npipe" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ var err error
+ if c.TLSConfig == nil {
+ dial, err = c.Dialer.Dial(protocol, address)
+ } else {
+ netDialer, ok := c.Dialer.(*net.Dialer)
+ if !ok {
+ return ErrTLSNotSupported
+ }
+ dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig)
+ }
+ if err != nil {
+ return err
+ }
+ conn := httputil.NewClientConn(dial, nil)
+ req, err := http.NewRequest("GET", uri, nil)
+ if err != nil {
+ return err
+ }
+ res, err := conn.Do(req)
+ if err != nil {
+ return err
+ }
+ go func(res *http.Response, conn *httputil.ClientConn) {
+ defer conn.Close()
+ defer res.Body.Close()
+ decoder := json.NewDecoder(res.Body)
+ for {
+ var event APIEvents
+ if err = decoder.Decode(&event); err != nil {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ c.eventMonitor.RLock()
+ if c.eventMonitor.enabled && c.eventMonitor.C == eventChan {
+ // Signal that we're exiting.
+ eventChan <- EOFEvent
+ }
+ c.eventMonitor.RUnlock()
+ break
+ }
+ errChan <- err
+ }
+ if event.Time == 0 {
+ continue
+ }
+ transformEvent(&event)
+ c.eventMonitor.RLock()
+ if c.eventMonitor.enabled && c.eventMonitor.C == eventChan {
+ eventChan <- &event
+ }
+ c.eventMonitor.RUnlock()
+ }
+ }(res, conn)
+ return nil
+}
+
+// transformEvent takes an event and determines what version it is from
+// then populates both versions of the event
+func transformEvent(event *APIEvents) {
+ // if event version is <= 1.21 there will be no Action and no Type
+ if event.Action == "" && event.Type == "" {
+ event.Action = event.Status
+ event.Actor.ID = event.ID
+ event.Actor.Attributes = map[string]string{}
+ switch event.Status {
+ case "delete", "import", "pull", "push", "tag", "untag":
+ event.Type = "image"
+ default:
+ event.Type = "container"
+ if event.From != "" {
+ event.Actor.Attributes["image"] = event.From
+ }
+ }
+ } else {
+ if event.Status == "" {
+ if event.Type == "image" || event.Type == "container" {
+ event.Status = event.Action
+ } else {
+ // Because just the Status has been overloaded with different Types
+ // if an event is not for an image or a container, we prepend the type
+ // to avoid problems for people relying on actions being only for
+ // images and containers
+ event.Status = event.Type + ":" + event.Action
+ }
+ }
+ if event.ID == "" {
+ event.ID = event.Actor.ID
+ }
+ if event.From == "" {
+ event.From = event.Actor.Attributes["image"]
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go
new file mode 100644
index 000000000..3b875fa3c
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/exec.go
@@ -0,0 +1,213 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// Exec is the type representing a `docker exec` instance and containing the
+// instance ID
+type Exec struct {
+ ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
+}
+
+// CreateExecOptions specify parameters to the CreateExecContainer function.
+//
+// See https://goo.gl/60TeBP for more details
+type CreateExecOptions struct {
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty" toml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"`
+ Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"`
+ Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty" toml:"Cmd,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"`
+ Context context.Context `json:"-"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty" toml:"Privileged,omitempty"`
+}
+
+// CreateExec sets up an exec instance in a running container `id`, returning the exec
+// instance, or an error in case of failure.
+//
+// See https://goo.gl/60TeBP for more details
+func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
+ if len(opts.Env) > 0 && c.serverAPIVersion.LessThan(apiVersion125) {
+ return nil, errors.New("exec configuration Env is only supported in API#1.25 and above")
+ }
+ path := fmt.Sprintf("/containers/%s/exec", opts.Container)
+ resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var exec Exec
+ if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
+ return nil, err
+ }
+
+ return &exec, nil
+}
+
+// StartExecOptions specify parameters to the StartExecContainer function.
+//
+// See https://goo.gl/1EeDWi for more details
+type StartExecOptions struct {
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty" toml:"Detach,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"`
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{} `json:"-"`
+
+ Context context.Context `json:"-"`
+}
+
+// StartExec starts a previously set up exec instance id. If opts.Detach is
+// true, it returns after starting the exec command. Otherwise, it sets up an
+// interactive session with the exec command.
+//
+// See https://goo.gl/1EeDWi for more details
+func (c *Client) StartExec(id string, opts StartExecOptions) error {
+ cw, err := c.StartExecNonBlocking(id, opts)
+ if err != nil {
+ return err
+ }
+ if cw != nil {
+ return cw.Wait()
+ }
+ return nil
+}
+
+// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is
+// true, it returns after starting the exec command. Otherwise, it sets up an
+// interactive session with the exec command.
+//
+// See https://goo.gl/1EeDWi for more details
+func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
+ if id == "" {
+ return nil, &NoSuchExec{ID: id}
+ }
+
+ path := fmt.Sprintf("/exec/%s/start", id)
+
+ if opts.Detach {
+ resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchExec{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ return nil, nil
+ }
+
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ data: opts,
+ })
+}
+
+// ResizeExecTTY resizes the tty session used by the exec command id. This API
+// is valid only if Tty was specified as part of creating and starting the exec
+// command.
+//
+// See https://goo.gl/Mo5bxx for more details
+func (c *Client) ResizeExecTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+
+ path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ExecProcessConfig is a type describing the command associated to a Exec
+// instance. It's used in the ExecInspect type.
+type ExecProcessConfig struct {
+ User string `json:"user,omitempty" yaml:"user,omitempty" toml:"user,omitempty"`
+ Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty" toml:"privileged,omitempty"`
+ Tty bool `json:"tty,omitempty" yaml:"tty,omitempty" toml:"tty,omitempty"`
+ EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty" toml:"entrypoint,omitempty"`
+ Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty" toml:"arguments,omitempty"`
+}
+
+// ExecInspect is a type with details about a exec instance, including the
+// exit code if the command has finished running. It's returned by a api
+// call to /exec/(id)/json
+//
+// See https://goo.gl/ctMUiW for more details
+type ExecInspect struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty" toml:"ID,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty" toml:"ExitCode,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty" toml:"Running,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty" toml:"OpenStdin,omitempty"`
+ OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty" toml:"OpenStderr,omitempty"`
+ OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty" toml:"OpenStdout,omitempty"`
+ ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty" toml:"ProcessConfig,omitempty"`
+ ContainerID string `json:"ContainerID,omitempty" yaml:"ContainerID,omitempty" toml:"ContainerID,omitempty"`
+ DetachKeys string `json:"DetachKeys,omitempty" yaml:"DetachKeys,omitempty" toml:"DetachKeys,omitempty"`
+ CanRemove bool `json:"CanRemove,omitempty" yaml:"CanRemove,omitempty" toml:"CanRemove,omitempty"`
+}
+
+// InspectExec returns low-level information about the exec command id.
+//
+// See https://goo.gl/ctMUiW for more details
+func (c *Client) InspectExec(id string) (*ExecInspect, error) {
+ path := fmt.Sprintf("/exec/%s/json", id)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchExec{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var exec ExecInspect
+ if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
+ return nil, err
+ }
+ return &exec, nil
+}
+
+// NoSuchExec is the error returned when a given exec instance does not exist.
+type NoSuchExec struct {
+ ID string
+}
+
+func (err *NoSuchExec) Error() string {
+ return "No such exec instance: " + err.ID
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go
new file mode 100644
index 000000000..124e78da3
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/image.go
@@ -0,0 +1,720 @@
+// Copyright 2013 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+)
+
+// APIImages represent an image returned in the ListImages call.
+type APIImages struct {
+ ID string `json:"Id" yaml:"Id" toml:"Id"`
+ RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"`
+ ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty" toml:"ParentId,omitempty"`
+ RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"`
+}
+
+// RootFS represents the underlying layers used by an image
+type RootFS struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"`
+ Layers []string `json:"Layers,omitempty" yaml:"Layers,omitempty" toml:"Layers,omitempty"`
+}
+
+// Image is the type representing a docker image and its various properties
+type Image struct {
+ ID string `json:"Id" yaml:"Id" toml:"Id"`
+ RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty" toml:"RepoTags,omitempty"`
+ Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty" toml:"Parent,omitempty"`
+ Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty" toml:"Comment,omitempty"`
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Created,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"`
+ ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty" toml:"ContainerConfig,omitempty"`
+ DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty" toml:"DockerVersion,omitempty"`
+ Author string `json:"Author,omitempty" yaml:"Author,omitempty" toml:"Author,omitempty"`
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"`
+ Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty" toml:"VirtualSize,omitempty"`
+ RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty" toml:"RepoDigests,omitempty"`
+ RootFS *RootFS `json:"RootFS,omitempty" yaml:"RootFS,omitempty" toml:"RootFS,omitempty"`
+ OS string `json:"Os,omitempty" yaml:"Os,omitempty" toml:"Os,omitempty"`
+}
+
+// ImagePre012 serves the same purpose as the Image type except that it is for
+// earlier versions of the Docker API (pre-012 to be specific)
+type ImagePre012 struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ Container string `json:"container,omitempty"`
+ ContainerConfig Config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ Config *Config `json:"config,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ Size int64 `json:"size,omitempty"`
+}
+
+var (
+ // ErrNoSuchImage is the error returned when the image does not exist.
+ ErrNoSuchImage = errors.New("no such image")
+
+ // ErrMissingRepo is the error returned when the remote repository is
+ // missing.
+ ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'")
+
+ // ErrMissingOutputStream is the error returned when no output stream
+ // is provided to some calls, like BuildImage.
+ ErrMissingOutputStream = errors.New("missing output stream")
+
+ // ErrMultipleContexts is the error returned when both a ContextDir and
+ // InputStream are provided in BuildImageOptions
+ ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream")
+
+ // ErrMustSpecifyNames is the error rreturned when the Names field on
+ // ExportImagesOptions is nil or empty
+ ErrMustSpecifyNames = errors.New("must specify at least one name to export")
+)
+
+// ListImagesOptions specify parameters to the ListImages function.
+//
+// See https://goo.gl/BVzauZ for more details.
+type ListImagesOptions struct {
+ Filters map[string][]string
+ All bool
+ Digests bool
+ Filter string
+ Context context.Context
+}
+
+// ListImages returns the list of available images in the server.
+//
+// See https://goo.gl/BVzauZ for more details.
+func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
+ path := "/images/json?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var images []APIImages
+ if err := json.NewDecoder(resp.Body).Decode(&images); err != nil {
+ return nil, err
+ }
+ return images, nil
+}
+
+// ImageHistory represent a layer in an image's history returned by the
+// ImageHistory call.
+type ImageHistory struct {
+ ID string `json:"Id" yaml:"Id" toml:"Id"`
+ Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty" toml:"Tags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty" toml:"Tags,omitempty"`
+ CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty" toml:"CreatedBy,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty" toml:"Size,omitempty"`
+}
+
+// ImageHistory returns the history of the image by its name or ID.
+//
+// See https://goo.gl/fYtxQa for more details.
+func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
+ resp, err := c.do("GET", "/images/"+name+"/history", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var history []ImageHistory
+ if err := json.NewDecoder(resp.Body).Decode(&history); err != nil {
+ return nil, err
+ }
+ return history, nil
+}
+
+// RemoveImage removes an image by its name or ID.
+//
+// See https://goo.gl/Vd2Pck for more details.
+func (c *Client) RemoveImage(name string) error {
+ resp, err := c.do("DELETE", "/images/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RemoveImageOptions present the set of options available for removing an image
+// from a registry.
+//
+// See https://goo.gl/Vd2Pck for more details.
+type RemoveImageOptions struct {
+ Force bool `qs:"force"`
+ NoPrune bool `qs:"noprune"`
+ Context context.Context
+}
+
+// RemoveImageExtended removes an image by its name or ID.
+// Extra params can be passed, see RemoveImageOptions
+//
+// See https://goo.gl/Vd2Pck for more details.
+func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error {
+ uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts))
+ resp, err := c.do("DELETE", uri, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// InspectImage returns an image by its name or ID.
+//
+// See https://goo.gl/ncLTG8 for more details.
+func (c *Client) InspectImage(name string) (*Image, error) {
+ resp, err := c.do("GET", "/images/"+name+"/json", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var image Image
+
+ // if the caller elected to skip checking the server's version, assume it's the latest
+ if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) {
+ if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
+ return nil, err
+ }
+ } else {
+ var imagePre012 ImagePre012
+ if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil {
+ return nil, err
+ }
+
+ image.ID = imagePre012.ID
+ image.Parent = imagePre012.Parent
+ image.Comment = imagePre012.Comment
+ image.Created = imagePre012.Created
+ image.Container = imagePre012.Container
+ image.ContainerConfig = imagePre012.ContainerConfig
+ image.DockerVersion = imagePre012.DockerVersion
+ image.Author = imagePre012.Author
+ image.Config = imagePre012.Config
+ image.Architecture = imagePre012.Architecture
+ image.Size = imagePre012.Size
+ }
+
+ return &image, nil
+}
+
+// PushImageOptions represents options to use in the PushImage method.
+//
+// See https://goo.gl/BZemGg for more details.
+type PushImageOptions struct {
+ // Name of the image
+ Name string
+
+ // Tag of the image
+ Tag string
+
+ // Registry server to push the image
+ Registry string
+
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+
+ Context context.Context
+}
+
+// PushImage pushes an image to a remote registry, logging progress to w.
+//
+// An empty instance of AuthConfiguration may be used for unauthenticated
+// pushes.
+//
+// See https://goo.gl/BZemGg for more details.
+func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error {
+ if opts.Name == "" {
+ return ErrNoSuchImage
+ }
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ name := opts.Name
+ opts.Name = ""
+ path := "/images/" + name + "/push?" + queryString(&opts)
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
+
+// PullImageOptions present the set of options available for pulling an image
+// from a registry.
+//
+// See https://goo.gl/qkoSsn for more details.
+type PullImageOptions struct {
+ Repository string `qs:"fromImage"`
+ Tag string
+
+ // Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21
+ // and Docker Engine < 1.9
+ // This parameter was removed in Docker Engine 1.11
+ Registry string
+
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// PullImage pulls an image from a remote registry, logging progress to
+// opts.OutputStream.
+//
+// See https://goo.gl/qkoSsn for more details.
+func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ if opts.Tag == "" && strings.Contains(opts.Repository, "@") {
+ parts := strings.SplitN(opts.Repository, "@", 2)
+ opts.Repository = parts[0]
+ opts.Tag = parts[1]
+ }
+ return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context)
+}
+
+func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error {
+ path := "/images/create?" + qs
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ headers: headers,
+ in: in,
+ stdout: w,
+ rawJSONStream: rawJSONStream,
+ inactivityTimeout: timeout,
+ context: context,
+ })
+}
+
+// LoadImageOptions represents the options for LoadImage Docker API Call
+//
+// See https://goo.gl/rEsBV3 for more details.
+type LoadImageOptions struct {
+ InputStream io.Reader
+ OutputStream io.Writer
+ Context context.Context
+}
+
+// LoadImage imports a tarball docker image
+//
+// See https://goo.gl/rEsBV3 for more details.
+func (c *Client) LoadImage(opts LoadImageOptions) error {
+ return c.stream("POST", "/images/load", streamOptions{
+ setRawTerminal: true,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ context: opts.Context,
+ })
+}
+
+// ExportImageOptions represent the options for ExportImage Docker API call.
+//
+// See https://goo.gl/AuySaA for more details.
+type ExportImageOptions struct {
+ Name string
+ OutputStream io.Writer
+ InactivityTimeout time.Duration
+ Context context.Context
+}
+
+// ExportImage exports an image (as a tar file) into the stream.
+//
+// See https://goo.gl/AuySaA for more details.
+func (c *Client) ExportImage(opts ExportImageOptions) error {
+ return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
+
+// ExportImagesOptions represent the options for ExportImages Docker API call
+//
+// See https://goo.gl/N9XlDn for more details.
+type ExportImagesOptions struct {
+ Names []string
+ OutputStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// ExportImages exports one or more images (as a tar file) into the stream
+//
+// See https://goo.gl/N9XlDn for more details.
+func (c *Client) ExportImages(opts ExportImagesOptions) error {
+ if opts.Names == nil || len(opts.Names) == 0 {
+ return ErrMustSpecifyNames
+ }
+ return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+// ImportImageOptions present the set of informations available for importing
+// an image from a source file or the stdin.
+//
+// See https://goo.gl/qkoSsn for more details.
+type ImportImageOptions struct {
+ Repository string `qs:"repo"`
+ Source string `qs:"fromSrc"`
+ Tag string `qs:"tag"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// ImportImage imports an image from a url, a file or stdin
+//
+// See https://goo.gl/qkoSsn for more details.
+func (c *Client) ImportImage(opts ImportImageOptions) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+ if opts.Source != "-" {
+ opts.InputStream = nil
+ }
+ if opts.Source != "-" && !isURL(opts.Source) {
+ f, err := os.Open(opts.Source)
+ if err != nil {
+ return err
+ }
+ opts.InputStream = f
+ opts.Source = "-"
+ }
+ return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context)
+}
+
+// BuildImageOptions present the set of informations available for building an
+// image from a tarfile with a Dockerfile in it.
+//
+// For more details about the Docker building process, see
+// https://goo.gl/4nYHwV.
+type BuildImageOptions struct {
+ Name string `qs:"t"`
+ Dockerfile string `qs:"dockerfile"`
+ NoCache bool `qs:"nocache"`
+ CacheFrom []string `qs:"-"`
+ SuppressOutput bool `qs:"q"`
+ Pull bool `qs:"pull"`
+ RmTmpContainer bool `qs:"rm"`
+ ForceRmTmpContainer bool `qs:"forcerm"`
+ RawJSONStream bool `qs:"-"`
+ Memory int64 `qs:"memory"`
+ Memswap int64 `qs:"memswap"`
+ CPUShares int64 `qs:"cpushares"`
+ CPUQuota int64 `qs:"cpuquota"`
+ CPUPeriod int64 `qs:"cpuperiod"`
+ CPUSetCPUs string `qs:"cpusetcpus"`
+ Labels map[string]string `qs:"labels"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ Remote string `qs:"remote"`
+ Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header
+ AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header
+ ContextDir string `qs:"-"`
+ Ulimits []ULimit `qs:"-"`
+ BuildArgs []BuildArg `qs:"-"`
+ NetworkMode string `qs:"networkmode"`
+ InactivityTimeout time.Duration `qs:"-"`
+ CgroupParent string `qs:"cgroupparent"`
+ SecurityOpt []string `qs:"securityopt"`
+ Target string `gs:"target"`
+ Context context.Context
+}
+
+// BuildArg represents arguments that can be passed to the image when building
+// it from a Dockerfile.
+//
+// For more details about the Docker building process, see
+// https://goo.gl/4nYHwV.
+type BuildArg struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"`
+}
+
+// BuildImage builds an image from a tarball's url or a Dockerfile in the input
+// stream.
+//
+// See https://goo.gl/4nYHwV for more details.
+func (c *Client) BuildImage(opts BuildImageOptions) error {
+ if opts.OutputStream == nil {
+ return ErrMissingOutputStream
+ }
+ headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs))
+ if err != nil {
+ return err
+ }
+
+ if opts.Remote != "" && opts.Name == "" {
+ opts.Name = opts.Remote
+ }
+ if opts.InputStream != nil || opts.ContextDir != "" {
+ headers["Content-Type"] = "application/tar"
+ } else if opts.Remote == "" {
+ return ErrMissingRepo
+ }
+ if opts.ContextDir != "" {
+ if opts.InputStream != nil {
+ return ErrMultipleContexts
+ }
+ var err error
+ if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil {
+ return err
+ }
+ }
+ qs := queryString(&opts)
+
+ if c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion125) && len(opts.CacheFrom) > 0 {
+ if b, err := json.Marshal(opts.CacheFrom); err == nil {
+ item := url.Values(map[string][]string{})
+ item.Add("cachefrom", string(b))
+ qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ }
+ }
+
+ if len(opts.Ulimits) > 0 {
+ if b, err := json.Marshal(opts.Ulimits); err == nil {
+ item := url.Values(map[string][]string{})
+ item.Add("ulimits", string(b))
+ qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ }
+ }
+
+ if len(opts.BuildArgs) > 0 {
+ v := make(map[string]string)
+ for _, arg := range opts.BuildArgs {
+ v[arg.Name] = arg.Value
+ }
+ if b, err := json.Marshal(v); err == nil {
+ item := url.Values(map[string][]string{})
+ item.Add("buildargs", string(b))
+ qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ }
+ }
+
+ return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
+
+func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} {
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) {
+ return AuthConfigurations119(authConfigs.Configs)
+ }
+ return authConfigs
+}
+
+// TagImageOptions present the set of options to tag an image.
+//
+// See https://goo.gl/prHrvo for more details.
+type TagImageOptions struct {
+ Repo string
+ Tag string
+ Force bool
+ Context context.Context
+}
+
+// TagImage adds a tag to the image identified by the given name.
+//
+// See https://goo.gl/prHrvo for more details.
+func (c *Client) TagImage(name string, opts TagImageOptions) error {
+ if name == "" {
+ return ErrNoSuchImage
+ }
+ resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{
+ context: opts.Context,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+
+ return err
+}
+
+func isURL(u string) bool {
+ p, err := url.Parse(u)
+ if err != nil {
+ return false
+ }
+ return p.Scheme == "http" || p.Scheme == "https"
+}
+
+func headersWithAuth(auths ...interface{}) (map[string]string, error) {
+ var headers = make(map[string]string)
+
+ for _, auth := range auths {
+ switch auth.(type) {
+ case AuthConfiguration:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ case AuthConfigurations, AuthConfigurations119:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ }
+ }
+
+ return headers, nil
+}
+
+// APIImageSearch reflect the result of a search on the Docker Hub.
+//
+// See https://goo.gl/KLO9IZ for more details.
+type APIImageSearch struct {
+ Description string `json:"description,omitempty" yaml:"description,omitempty" toml:"description,omitempty"`
+ IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty" toml:"is_official,omitempty"`
+ IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty" toml:"is_automated,omitempty"`
+ Name string `json:"name,omitempty" yaml:"name,omitempty" toml:"name,omitempty"`
+ StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty" toml:"star_count,omitempty"`
+}
+
+// SearchImages search the docker hub with a specific given term.
+//
+// See https://goo.gl/KLO9IZ for more details.
+func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
+ resp, err := c.do("GET", "/images/search?term="+term, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var searchResult []APIImageSearch
+ if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil {
+ return nil, err
+ }
+ return searchResult, nil
+}
+
+// SearchImagesEx search the docker hub with a specific given term and authentication.
+//
+// See https://goo.gl/KLO9IZ for more details.
+func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) {
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.do("GET", "/images/search?term="+term, doOptions{
+ headers: headers,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ defer resp.Body.Close()
+
+ var searchResult []APIImageSearch
+ if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil {
+ return nil, err
+ }
+
+ return searchResult, nil
+}
+
+// PruneImagesOptions specify parameters to the PruneImages function.
+//
+// See https://goo.gl/qfZlbZ for more details.
+type PruneImagesOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// PruneImagesResults specify results from the PruneImages function.
+//
+// See https://goo.gl/qfZlbZ for more details.
+type PruneImagesResults struct {
+ ImagesDeleted []struct{ Untagged, Deleted string }
+ SpaceReclaimed int64
+}
+
+// PruneImages deletes images which are unused.
+//
+// See https://goo.gl/qfZlbZ for more details.
+func (c *Client) PruneImages(opts PruneImagesOptions) (*PruneImagesResults, error) {
+ path := "/images/prune?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var results PruneImagesResults
+ if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
+ return nil, err
+ }
+ return &results, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go
new file mode 100644
index 000000000..1fc37b14e
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/misc.go
@@ -0,0 +1,188 @@
+// Copyright 2013 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net"
+ "strings"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// Version returns version information about the docker server.
+//
+// See https://goo.gl/mU7yje for more details.
+func (c *Client) Version() (*Env, error) {
+ return c.VersionWithContext(nil)
+}
+
+// VersionWithContext returns version information about the docker server.
+func (c *Client) VersionWithContext(ctx context.Context) (*Env, error) {
+ resp, err := c.do("GET", "/version", doOptions{context: ctx})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var env Env
+ if err := env.Decode(resp.Body); err != nil {
+ return nil, err
+ }
+ return &env, nil
+}
+
+// DockerInfo contains information about the Docker server
+//
+// See https://goo.gl/bHUoz9 for more details.
+type DockerInfo struct {
+ ID string
+ Containers int
+ ContainersRunning int
+ ContainersPaused int
+ ContainersStopped int
+ Images int
+ Driver string
+ DriverStatus [][2]string
+ SystemStatus [][2]string
+ Plugins PluginsInfo
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
+ Debug bool
+ OomKillDisable bool
+ ExperimentalBuild bool
+ NFd int
+ NGoroutines int
+ SystemTime string
+ ExecutionDriver string
+ LoggingDriver string
+ CgroupDriver string
+ NEventsListener int
+ KernelVersion string
+ OperatingSystem string
+ OSType string
+ Architecture string
+ IndexServerAddress string
+ RegistryConfig *ServiceConfig
+ SecurityOptions []string
+ NCPU int
+ MemTotal int64
+ DockerRootDir string
+ HTTPProxy string `json:"HttpProxy"`
+ HTTPSProxy string `json:"HttpsProxy"`
+ NoProxy string
+ Name string
+ Labels []string
+ ServerVersion string
+ ClusterStore string
+ ClusterAdvertise string
+ Isolation string
+ InitBinary string
+ DefaultRuntime string
+ LiveRestoreEnabled bool
+ Swarm swarm.Info
+}
+
+// PluginsInfo is a struct with the plugins registered with the docker daemon
+//
+// for more information, see: https://goo.gl/bHUoz9
+type PluginsInfo struct {
+ // List of Volume plugins registered
+ Volume []string
+ // List of Network plugins registered
+ Network []string
+ // List of Authorization plugins registered
+ Authorization []string
+}
+
+// ServiceConfig stores daemon registry services configuration.
+//
+// for more information, see: https://goo.gl/7iFFDz
+type ServiceConfig struct {
+ InsecureRegistryCIDRs []*NetIPNet
+ IndexConfigs map[string]*IndexInfo
+ Mirrors []string
+}
+
+// NetIPNet is the net.IPNet type, which can be marshalled and
+// unmarshalled to JSON.
+//
+// for more information, see: https://goo.gl/7iFFDz
+type NetIPNet net.IPNet
+
+// MarshalJSON returns the JSON representation of the IPNet.
+//
+func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(ipnet).String())
+}
+
+// UnmarshalJSON sets the IPNet from a byte array of JSON.
+//
+func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
+ var ipnetStr string
+ if err = json.Unmarshal(b, &ipnetStr); err == nil {
+ var cidr *net.IPNet
+ if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
+ *ipnet = NetIPNet(*cidr)
+ }
+ }
+ return
+}
+
+// IndexInfo contains information about a registry.
+//
+// for more information, see: https://goo.gl/7iFFDz
+type IndexInfo struct {
+ Name string
+ Mirrors []string
+ Secure bool
+ Official bool
+}
+
+// Info returns system-wide information about the Docker server.
+//
+// See https://goo.gl/ElTHi2 for more details.
+func (c *Client) Info() (*DockerInfo, error) {
+ resp, err := c.do("GET", "/info", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var info DockerInfo
+ if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+// ParseRepositoryTag gets the name of the repository and returns it splitted
+// in two parts: the repository and the tag. It ignores the digest when it is
+// present.
+//
+// Some examples:
+//
+// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest
+// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, ""
+// busybox:latest@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92 -> busybox, latest
+func ParseRepositoryTag(repoTag string) (repository string, tag string) {
+ parts := strings.SplitN(repoTag, "@", 2)
+ repoTag = parts[0]
+ n := strings.LastIndex(repoTag, ":")
+ if n < 0 {
+ return repoTag, ""
+ }
+ if tag := repoTag[n+1:]; !strings.Contains(tag, "/") {
+ return repoTag[:n], tag
+ }
+ return repoTag, ""
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go
new file mode 100644
index 000000000..c6ddb22c6
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/network.go
@@ -0,0 +1,321 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+)
+
+// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the
+// network already exists.
+var ErrNetworkAlreadyExists = errors.New("network already exists")
+
+// Network represents a network.
+//
+// See https://goo.gl/6GugX3 for more details.
+type Network struct {
+ Name string
+ ID string `json:"Id"`
+ Scope string
+ Driver string
+ IPAM IPAMOptions
+ Containers map[string]Endpoint
+ Options map[string]string
+ Internal bool
+ EnableIPv6 bool `json:"EnableIPv6"`
+ Labels map[string]string
+}
+
+// Endpoint contains network resources allocated and used for a container in a network
+//
+// See https://goo.gl/6GugX3 for more details.
+type Endpoint struct {
+ Name string
+ ID string `json:"EndpointID"`
+ MacAddress string
+ IPv4Address string
+ IPv6Address string
+}
+
+// ListNetworks returns all networks.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) ListNetworks() ([]Network, error) {
+ resp, err := c.do("GET", "/networks", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var networks []Network
+ if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil {
+ return nil, err
+ }
+ return networks, nil
+}
+
+// NetworkFilterOpts is an aggregation of key=value that Docker
+// uses to filter networks
+type NetworkFilterOpts map[string]map[string]bool
+
+// FilteredListNetworks returns all networks with the filters applied
+//
+// See goo.gl/zd2mx4 for more details.
+func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) {
+ params, err := json.Marshal(opts)
+ if err != nil {
+ return nil, err
+ }
+ path := "/networks?filters=" + string(params)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var networks []Network
+ if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil {
+ return nil, err
+ }
+ return networks, nil
+}
+
+// NetworkInfo returns information about a network by its ID.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) NetworkInfo(id string) (*Network, error) {
+ path := "/networks/" + id
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchNetwork{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var network Network
+ if err := json.NewDecoder(resp.Body).Decode(&network); err != nil {
+ return nil, err
+ }
+ return &network, nil
+}
+
+// CreateNetworkOptions specify parameters to the CreateNetwork function and
+// (for now) is the expected body of the "create network" http request message
+//
+// See https://goo.gl/6GugX3 for more details.
+type CreateNetworkOptions struct {
+ Name string `json:"Name" yaml:"Name" toml:"Name"`
+ Driver string `json:"Driver" yaml:"Driver" toml:"Driver"`
+ IPAM *IPAMOptions `json:"IPAM,omitempty" yaml:"IPAM" toml:"IPAM"`
+ Options map[string]interface{} `json:"Options" yaml:"Options" toml:"Options"`
+ Labels map[string]string `json:"Labels" yaml:"Labels" toml:"Labels"`
+ CheckDuplicate bool `json:"CheckDuplicate" yaml:"CheckDuplicate" toml:"CheckDuplicate"`
+ Internal bool `json:"Internal" yaml:"Internal" toml:"Internal"`
+ EnableIPv6 bool `json:"EnableIPv6" yaml:"EnableIPv6" toml:"EnableIPv6"`
+ Context context.Context `json:"-"`
+}
+
+// IPAMOptions controls IP Address Management when creating a network
+//
+// See https://goo.gl/T8kRVH for more details.
+type IPAMOptions struct {
+ Driver string `json:"Driver" yaml:"Driver" toml:"Driver"`
+ Config []IPAMConfig `json:"Config" yaml:"Config" toml:"Config"`
+ Options map[string]string `json:"Options" yaml:"Options" toml:"Options"`
+}
+
+// IPAMConfig represents IPAM configurations
+//
+// See https://goo.gl/T8kRVH for more details.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ IPRange string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+ AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
+}
+
+// CreateNetwork creates a new network, returning the network instance,
+// or an error in case of failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
+ resp, err := c.do(
+ "POST",
+ "/networks/create",
+ doOptions{
+ data: opts,
+ context: opts.Context,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ type createNetworkResponse struct {
+ ID string
+ }
+ var (
+ network Network
+ cnr createNetworkResponse
+ )
+ if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil {
+ return nil, err
+ }
+
+ network.Name = opts.Name
+ network.ID = cnr.ID
+ network.Driver = opts.Driver
+
+ return &network, nil
+}
+
+// RemoveNetwork removes a network or returns an error in case of failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) RemoveNetwork(id string) error {
+ resp, err := c.do("DELETE", "/networks/"+id, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNetwork{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// NetworkConnectionOptions specify parameters to the ConnectNetwork and
+// DisconnectNetwork function.
+//
+// See https://goo.gl/RV7BJU for more details.
+type NetworkConnectionOptions struct {
+ Container string
+
+ // EndpointConfig is only applicable to the ConnectNetwork call
+ EndpointConfig *EndpointConfig `json:"EndpointConfig,omitempty"`
+
+ // Force is only applicable to the DisconnectNetwork call
+ Force bool
+
+ Context context.Context `json:"-"`
+}
+
+// EndpointConfig stores network endpoint details
+//
+// See https://goo.gl/RV7BJU for more details.
+type EndpointConfig struct {
+ IPAMConfig *EndpointIPAMConfig `json:"IPAMConfig,omitempty" yaml:"IPAMConfig,omitempty" toml:"IPAMConfig,omitempty"`
+ Links []string `json:"Links,omitempty" yaml:"Links,omitempty" toml:"Links,omitempty"`
+ Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty" toml:"Aliases,omitempty"`
+ NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty" toml:"NetworkID,omitempty"`
+ EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty" toml:"EndpointID,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty" toml:"Gateway,omitempty"`
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty" toml:"IPAddress,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty" toml:"IPPrefixLen,omitempty"`
+ IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty" toml:"IPv6Gateway,omitempty"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty" toml:"GlobalIPv6Address,omitempty"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty" toml:"GlobalIPv6PrefixLen,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty" toml:"MacAddress,omitempty"`
+}
+
+// EndpointIPAMConfig represents IPAM configurations for an
+// endpoint
+//
+// See https://goo.gl/RV7BJU for more details.
+type EndpointIPAMConfig struct {
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+}
+
+// ConnectNetwork adds a container to a network or returns an error in case of
+// failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error {
+ resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{
+ data: opts,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// DisconnectNetwork removes a container from a network or returns an error in
+// case of failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error {
+ resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// PruneNetworksOptions specify parameters to the PruneNetworks function.
+//
+// See https://goo.gl/kX0S9h for more details.
+type PruneNetworksOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// PruneNetworksResults specify results from the PruneNetworks function.
+//
+// See https://goo.gl/kX0S9h for more details.
+type PruneNetworksResults struct {
+ NetworksDeleted []string
+}
+
+// PruneNetworks deletes networks which are unused.
+//
+// See https://goo.gl/kX0S9h for more details.
+func (c *Client) PruneNetworks(opts PruneNetworksOptions) (*PruneNetworksResults, error) {
+ path := "/networks/prune?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var results PruneNetworksResults
+ if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
+ return nil, err
+ }
+ return &results, nil
+}
+
+// NoSuchNetwork is the error returned when a given network does not exist.
+type NoSuchNetwork struct {
+ ID string
+}
+
+func (err *NoSuchNetwork) Error() string {
+ return fmt.Sprintf("No such network: %s", err.ID)
+}
+
+// NoSuchNetworkOrContainer is the error returned when a given network or
+// container does not exist.
+type NoSuchNetworkOrContainer struct {
+ NetworkID string
+ ContainerID string
+}
+
+func (err *NoSuchNetworkOrContainer) Error() string {
+ return fmt.Sprintf("No such network (%s) or container (%s)", err.NetworkID, err.ContainerID)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go
new file mode 100644
index 000000000..a28ff3d1e
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go
@@ -0,0 +1,418 @@
+// Copyright 2018 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+)
+
+// PluginPrivilege represents a privilege for a plugin.
+type PluginPrivilege struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"`
+ Value []string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"`
+}
+
+// InstallPluginOptions specify parameters to the InstallPlugins function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type InstallPluginOptions struct {
+ Remote string
+ Name string
+ Plugins []PluginPrivilege `qs:"-"`
+
+ Auth AuthConfiguration
+
+ Context context.Context
+}
+
+// InstallPlugins installs a plugin or returns an error in case of failure.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) InstallPlugins(opts InstallPluginOptions) error {
+ path := "/plugins/pull?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.Plugins,
+ context: opts.Context,
+ })
+ defer resp.Body.Close()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// PluginSettings stores plugin settings.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginSettings struct {
+ Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"`
+ Args []string `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"`
+ Devices []string `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"`
+}
+
+// PluginInterface stores plugin interface.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginInterface struct {
+ Types []string `json:"Types,omitempty" yaml:"Types,omitempty" toml:"Types,omitempty"`
+ Socket string `json:"Socket,omitempty" yaml:"Socket,omitempty" toml:"Socket,omitempty"`
+}
+
+// PluginNetwork stores plugin network type.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginNetwork struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty" toml:"Type,omitempty"`
+}
+
+// PluginLinux stores plugin linux setting.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginLinux struct {
+ Capabilities []string `json:"Capabilities,omitempty" yaml:"Capabilities,omitempty" toml:"Capabilities,omitempty"`
+ AllowAllDevices bool `json:"AllowAllDevices,omitempty" yaml:"AllowAllDevices,omitempty" toml:"AllowAllDevices,omitempty"`
+ Devices []PluginLinuxDevices `json:"Devices,omitempty" yaml:"Devices,omitempty" toml:"Devices,omitempty"`
+}
+
+// PluginLinuxDevices stores plugin linux device setting.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginLinuxDevices struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Description string `json:"Documentation,omitempty" yaml:"Documentation,omitempty" toml:"Documentation,omitempty"`
+ Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"`
+ Path string `json:"Path,omitempty" yaml:"Path,omitempty" toml:"Path,omitempty"`
+}
+
+// PluginEnv stores plugin environment.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginEnv struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"`
+ Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"`
+ Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"`
+}
+
+// PluginArgs stores plugin arguments.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginArgs struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"`
+ Settable []string `json:"Settable,omitempty" yaml:"Settable,omitempty" toml:"Settable,omitempty"`
+ Value []string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"`
+}
+
+// PluginUser stores plugin user.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginUser struct {
+ UID int32 `json:"UID,omitempty" yaml:"UID,omitempty" toml:"UID,omitempty"`
+ GID int32 `json:"GID,omitempty" yaml:"GID,omitempty" toml:"GID,omitempty"`
+}
+
+// PluginConfig stores plugin config.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginConfig struct {
+ Description string `json:"Description,omitempty" yaml:"Description,omitempty" toml:"Description,omitempty"`
+ Documentation string
+ Interface PluginInterface `json:"Interface,omitempty" yaml:"Interface,omitempty" toml:"Interface,omitempty"`
+ Entrypoint []string `json:"Entrypoint,omitempty" yaml:"Entrypoint,omitempty" toml:"Entrypoint,omitempty"`
+ WorkDir string `json:"WorkDir,omitempty" yaml:"WorkDir,omitempty" toml:"WorkDir,omitempty"`
+ User PluginUser `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"`
+ Network PluginNetwork `json:"Network,omitempty" yaml:"Network,omitempty" toml:"Network,omitempty"`
+ Linux PluginLinux `json:"Linux,omitempty" yaml:"Linux,omitempty" toml:"Linux,omitempty"`
+ PropagatedMount string `json:"PropagatedMount,omitempty" yaml:"PropagatedMount,omitempty" toml:"PropagatedMount,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty" toml:"Mounts,omitempty"`
+ Env []PluginEnv `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"`
+ Args PluginArgs `json:"Args,omitempty" yaml:"Args,omitempty" toml:"Args,omitempty"`
+}
+
+// PluginDetail specify results from the ListPlugins function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PluginDetail struct {
+ ID string `json:"Id,omitempty" yaml:"Id,omitempty" toml:"Id,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ Tag string `json:"Tag,omitempty" yaml:"Tag,omitempty" toml:"Tag,omitempty"`
+ Active bool `json:"Active,omitempty" yaml:"Active,omitempty" toml:"Active,omitempty"`
+ Settings PluginSettings `json:"Settings,omitempty" yaml:"Settings,omitempty" toml:"Settings,omitempty"`
+ Config PluginConfig `json:"Config,omitempty" yaml:"Config,omitempty" toml:"Config,omitempty"`
+}
+
+// ListPlugins returns pluginDetails or an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) ListPlugins(ctx context.Context) ([]PluginDetail, error) {
+ resp, err := c.do("GET", "/plugins", doOptions{
+ context: ctx,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ pluginDetails := make([]PluginDetail, 0)
+ if err := json.NewDecoder(resp.Body).Decode(&pluginDetails); err != nil {
+ return nil, err
+ }
+ return pluginDetails, nil
+}
+
+// ListFilteredPluginsOptions specify parameters to the ListFilteredPlugins function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type ListFilteredPluginsOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListFilteredPlugins returns pluginDetails or an error.
+//
+// See https://goo.gl/rmdmWg for more details.
+func (c *Client) ListFilteredPlugins(opts ListFilteredPluginsOptions) ([]PluginDetail, error) {
+ path := "/plugins/json?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{
+ context: opts.Context,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ pluginDetails := make([]PluginDetail, 0)
+ if err := json.NewDecoder(resp.Body).Decode(&pluginDetails); err != nil {
+ return nil, err
+ }
+ return pluginDetails, nil
+}
+
+// GetPluginPrivileges returns pulginPrivileges or an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) GetPluginPrivileges(name string, ctx context.Context) ([]PluginPrivilege, error) {
+ resp, err := c.do("GET", "/plugins/privileges?remote="+name, doOptions{
+ context: ctx,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var pluginPrivileges []PluginPrivilege
+ if err := json.NewDecoder(resp.Body).Decode(&pluginPrivileges); err != nil {
+ return nil, err
+ }
+ return pluginPrivileges, nil
+}
+
+// InspectPlugins returns a pluginDetail or an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) InspectPlugins(name string, ctx context.Context) (*PluginDetail, error) {
+ resp, err := c.do("GET", "/plugins/"+name+"/json", doOptions{
+ context: ctx,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchPlugin{ID: name}
+ }
+ return nil, err
+ }
+ resp.Body.Close()
+ var pluginDetail PluginDetail
+ if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil {
+ return nil, err
+ }
+ return &pluginDetail, nil
+}
+
+// RemovePluginOptions specify parameters to the RemovePlugin function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type RemovePluginOptions struct {
+ // The Name of the plugin.
+ Name string `qs:"-"`
+
+ Force bool `qs:"force"`
+ Context context.Context
+}
+
+// RemovePlugin returns a PluginDetail or an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) {
+ path := "/plugins/" + opts.Name + "?" + queryString(opts)
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchPlugin{ID: opts.Name}
+ }
+ return nil, err
+ }
+ resp.Body.Close()
+ var pluginDetail PluginDetail
+ if err := json.NewDecoder(resp.Body).Decode(&pluginDetail); err != nil {
+ return nil, err
+ }
+ return &pluginDetail, nil
+}
+
+// EnablePluginOptions specify parameters to the EnablePlugin function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type EnablePluginOptions struct {
+ // The Name of the plugin.
+ Name string `qs:"-"`
+ Timeout int64 `qs:"timeout"`
+
+ Context context.Context
+}
+
+// EnablePlugin enables plugin that opts point or returns an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) EnablePlugin(opts EnablePluginOptions) error {
+ path := "/plugins/" + opts.Name + "/enable?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ defer resp.Body.Close()
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// DisablePluginOptions specify parameters to the DisablePlugin function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type DisablePluginOptions struct {
+ // The Name of the plugin.
+ Name string `qs:"-"`
+
+ Context context.Context
+}
+
+// DisablePlugin disables plugin that opts point or returns an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) DisablePlugin(opts DisablePluginOptions) error {
+ path := "/plugins/" + opts.Name + "/disable"
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ defer resp.Body.Close()
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// CreatePluginOptions specify parameters to the CreatePlugin function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type CreatePluginOptions struct {
+ // The Name of the plugin.
+ Name string `qs:"name"`
+ // Path to tar containing plugin
+ Path string `qs:"-"`
+
+ Context context.Context
+}
+
+// CreatePlugin creates plugin that opts point or returns an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) {
+ path := "/plugins/create?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.Path,
+ context: opts.Context})
+ defer resp.Body.Close()
+ if err != nil {
+ return "", err
+ }
+ containerNameBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+ return string(containerNameBytes), nil
+}
+
+// PushPluginOptions specify parameters to PushPlugin function.
+//
+// See https://goo.gl/C4t7Tz for more details.
+type PushPluginOptions struct {
+ // The Name of the plugin.
+ Name string
+
+ Context context.Context
+}
+
+// PushPlugin pushes plugin that opts point or returns an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) PushPlugin(opts PushPluginOptions) error {
+ path := "/plugins/" + opts.Name + "/push"
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ defer resp.Body.Close()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ConfigurePluginOptions specify parameters to the ConfigurePlugin
+//
+// See https://goo.gl/C4t7Tz for more details.
+type ConfigurePluginOptions struct {
+ // The Name of the plugin.
+ Name string `qs:"name"`
+ Envs []string
+
+ Context context.Context
+}
+
+// ConfigurePlugin configures plugin that opts point or returns an error.
+//
+// See https://goo.gl/C4t7Tz for more details.
+func (c *Client) ConfigurePlugin(opts ConfigurePluginOptions) error {
+ path := "/plugins/" + opts.Name + "/set"
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.Envs,
+ context: opts.Context,
+ })
+ defer resp.Body.Close()
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchPlugin{ID: opts.Name}
+ }
+ return err
+ }
+ return nil
+}
+
+// NoSuchPlugin is the error returned when a given plugin does not exist.
+type NoSuchPlugin struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchPlugin) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such plugin: " + err.ID
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/signal.go b/vendor/github.com/fsouza/go-dockerclient/signal.go
new file mode 100644
index 000000000..16aa00388
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/signal.go
@@ -0,0 +1,49 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+// Signal represents a signal that can be send to the container on
+// KillContainer call.
+type Signal int
+
+// These values represent all signals available on Linux, where containers will
+// be running.
+const (
+ SIGABRT = Signal(0x6)
+ SIGALRM = Signal(0xe)
+ SIGBUS = Signal(0x7)
+ SIGCHLD = Signal(0x11)
+ SIGCLD = Signal(0x11)
+ SIGCONT = Signal(0x12)
+ SIGFPE = Signal(0x8)
+ SIGHUP = Signal(0x1)
+ SIGILL = Signal(0x4)
+ SIGINT = Signal(0x2)
+ SIGIO = Signal(0x1d)
+ SIGIOT = Signal(0x6)
+ SIGKILL = Signal(0x9)
+ SIGPIPE = Signal(0xd)
+ SIGPOLL = Signal(0x1d)
+ SIGPROF = Signal(0x1b)
+ SIGPWR = Signal(0x1e)
+ SIGQUIT = Signal(0x3)
+ SIGSEGV = Signal(0xb)
+ SIGSTKFLT = Signal(0x10)
+ SIGSTOP = Signal(0x13)
+ SIGSYS = Signal(0x1f)
+ SIGTERM = Signal(0xf)
+ SIGTRAP = Signal(0x5)
+ SIGTSTP = Signal(0x14)
+ SIGTTIN = Signal(0x15)
+ SIGTTOU = Signal(0x16)
+ SIGUNUSED = Signal(0x1f)
+ SIGURG = Signal(0x17)
+ SIGUSR1 = Signal(0xa)
+ SIGUSR2 = Signal(0xc)
+ SIGVTALRM = Signal(0x1a)
+ SIGWINCH = Signal(0x1c)
+ SIGXCPU = Signal(0x18)
+ SIGXFSZ = Signal(0x19)
+)
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm.go b/vendor/github.com/fsouza/go-dockerclient/swarm.go
new file mode 100644
index 000000000..a257758fc
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm.go
@@ -0,0 +1,156 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+var (
+ // ErrNodeAlreadyInSwarm is the error returned by InitSwarm and JoinSwarm
+ // when the node is already part of a Swarm.
+ ErrNodeAlreadyInSwarm = errors.New("node already in a Swarm")
+
+ // ErrNodeNotInSwarm is the error returned by LeaveSwarm and UpdateSwarm
+ // when the node is not part of a Swarm.
+ ErrNodeNotInSwarm = errors.New("node is not in a Swarm")
+)
+
+// InitSwarmOptions specify parameters to the InitSwarm function.
+// See https://goo.gl/hzkgWu for more details.
+type InitSwarmOptions struct {
+ swarm.InitRequest
+ Context context.Context
+}
+
+// InitSwarm initializes a new Swarm and returns the node ID.
+// See https://goo.gl/ZWyG1M for more details.
+func (c *Client) InitSwarm(opts InitSwarmOptions) (string, error) {
+ path := "/swarm/init"
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.InitRequest,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) {
+ return "", ErrNodeAlreadyInSwarm
+ }
+ return "", err
+ }
+ defer resp.Body.Close()
+ var response string
+ if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
+ return "", err
+ }
+ return response, nil
+}
+
+// JoinSwarmOptions specify parameters to the JoinSwarm function.
+// See https://goo.gl/TdhJWU for more details.
+type JoinSwarmOptions struct {
+ swarm.JoinRequest
+ Context context.Context
+}
+
+// JoinSwarm joins an existing Swarm.
+// See https://goo.gl/N59IP1 for more details.
+func (c *Client) JoinSwarm(opts JoinSwarmOptions) error {
+ path := "/swarm/join"
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.JoinRequest,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) {
+ return ErrNodeAlreadyInSwarm
+ }
+ }
+ resp.Body.Close()
+ return err
+}
+
+// LeaveSwarmOptions specify parameters to the LeaveSwarm function.
+// See https://goo.gl/UWDlLg for more details.
+type LeaveSwarmOptions struct {
+ Force bool
+ Context context.Context
+}
+
+// LeaveSwarm leaves a Swarm.
+// See https://goo.gl/FTX1aD for more details.
+func (c *Client) LeaveSwarm(opts LeaveSwarmOptions) error {
+ params := make(url.Values)
+ params.Set("force", strconv.FormatBool(opts.Force))
+ path := "/swarm/leave?" + params.Encode()
+ resp, err := c.do("POST", path, doOptions{
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) {
+ return ErrNodeNotInSwarm
+ }
+ }
+ resp.Body.Close()
+ return err
+}
+
+// UpdateSwarmOptions specify parameters to the UpdateSwarm function.
+// See https://goo.gl/vFbq36 for more details.
+type UpdateSwarmOptions struct {
+ Version int
+ RotateWorkerToken bool
+ RotateManagerToken bool
+ Swarm swarm.Spec
+ Context context.Context
+}
+
+// UpdateSwarm updates a Swarm.
+// See https://goo.gl/iJFnsw for more details.
+func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error {
+ params := make(url.Values)
+ params.Set("version", strconv.Itoa(opts.Version))
+ params.Set("rotateWorkerToken", strconv.FormatBool(opts.RotateWorkerToken))
+ params.Set("rotateManagerToken", strconv.FormatBool(opts.RotateManagerToken))
+ path := "/swarm/update?" + params.Encode()
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.Swarm,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) {
+ return ErrNodeNotInSwarm
+ }
+ }
+ resp.Body.Close()
+ return err
+}
+
+// InspectSwarm inspects a Swarm.
+// See https://goo.gl/MFwgX9 for more details.
+func (c *Client) InspectSwarm(ctx context.Context) (swarm.Swarm, error) {
+ response := swarm.Swarm{}
+ resp, err := c.do("GET", "/swarm", doOptions{
+ context: ctx,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && (e.Status == http.StatusNotAcceptable || e.Status == http.StatusServiceUnavailable) {
+ return response, ErrNodeNotInSwarm
+ }
+ return response, err
+ }
+ defer resp.Body.Close()
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go
new file mode 100644
index 000000000..fb73ab2ef
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_configs.go
@@ -0,0 +1,171 @@
+// Copyright 2017 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NoSuchConfig is the error returned when a given config does not exist.
+type NoSuchConfig struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchConfig) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such config: " + err.ID
+}
+
+// CreateConfigOptions specify parameters to the CreateConfig function.
+//
+// See https://goo.gl/KrVjHz for more details.
+type CreateConfigOptions struct {
+ Auth AuthConfiguration `qs:"-"`
+ swarm.ConfigSpec
+ Context context.Context
+}
+
+// CreateConfig creates a new config, returning the config instance
+// or an error in case of failure.
+//
+// See https://goo.gl/KrVjHz for more details.
+func (c *Client) CreateConfig(opts CreateConfigOptions) (*swarm.Config, error) {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return nil, err
+ }
+ path := "/configs/create?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{
+ headers: headers,
+ data: opts.ConfigSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var config swarm.Config
+ if err := json.NewDecoder(resp.Body).Decode(&config); err != nil {
+ return nil, err
+ }
+ return &config, nil
+}
+
+// RemoveConfigOptions encapsulates options to remove a config.
+//
+// See https://goo.gl/Tqrtya for more details.
+type RemoveConfigOptions struct {
+ ID string `qs:"-"`
+ Context context.Context
+}
+
+// RemoveConfig removes a config, returning an error in case of failure.
+//
+// See https://goo.gl/Tqrtya for more details.
+func (c *Client) RemoveConfig(opts RemoveConfigOptions) error {
+ path := "/configs/" + opts.ID
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchConfig{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UpdateConfigOptions specify parameters to the UpdateConfig function.
+//
+// See https://goo.gl/wu3MmS for more details.
+type UpdateConfigOptions struct {
+ Auth AuthConfiguration `qs:"-"`
+ swarm.ConfigSpec
+ Context context.Context
+ Version uint64
+}
+
+// UpdateConfig updates the config at ID with the options
+//
+// Only label can be updated
+// https://docs.docker.com/engine/api/v1.33/#operation/ConfigUpdate
+// See https://goo.gl/wu3MmS for more details.
+func (c *Client) UpdateConfig(id string, opts UpdateConfigOptions) error {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return err
+ }
+ params := make(url.Values)
+ params.Set("version", strconv.FormatUint(opts.Version, 10))
+ resp, err := c.do("POST", "/configs/"+id+"/update?"+params.Encode(), doOptions{
+ headers: headers,
+ data: opts.ConfigSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchConfig{ID: id}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
+
+// InspectConfig returns information about a config by its ID.
+//
+// See https://goo.gl/dHmr75 for more details.
+func (c *Client) InspectConfig(id string) (*swarm.Config, error) {
+ path := "/configs/" + id
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchConfig{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var config swarm.Config
+ if err := json.NewDecoder(resp.Body).Decode(&config); err != nil {
+ return nil, err
+ }
+ return &config, nil
+}
+
+// ListConfigsOptions specify parameters to the ListConfigs function.
+//
+// See https://goo.gl/DwvNMd for more details.
+type ListConfigsOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListConfigs returns a slice of configs matching the given criteria.
+//
+// See https://goo.gl/DwvNMd for more details.
+func (c *Client) ListConfigs(opts ListConfigsOptions) ([]swarm.Config, error) {
+ path := "/configs?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var configs []swarm.Config
+ if err := json.NewDecoder(resp.Body).Decode(&configs); err != nil {
+ return nil, err
+ }
+ return configs, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_node.go b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go
new file mode 100644
index 000000000..095653cd9
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_node.go
@@ -0,0 +1,130 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NoSuchNode is the error returned when a given node does not exist.
+type NoSuchNode struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchNode) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such node: " + err.ID
+}
+
+// ListNodesOptions specify parameters to the ListNodes function.
+//
+// See http://goo.gl/3K4GwU for more details.
+type ListNodesOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListNodes returns a slice of nodes matching the given criteria.
+//
+// See http://goo.gl/3K4GwU for more details.
+func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) {
+ path := "/nodes?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var nodes []swarm.Node
+ if err := json.NewDecoder(resp.Body).Decode(&nodes); err != nil {
+ return nil, err
+ }
+ return nodes, nil
+}
+
+// InspectNode returns information about a node by its ID.
+//
+// See http://goo.gl/WjkTOk for more details.
+func (c *Client) InspectNode(id string) (*swarm.Node, error) {
+ resp, err := c.do("GET", "/nodes/"+id, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchNode{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var node swarm.Node
+ if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
+ return nil, err
+ }
+ return &node, nil
+}
+
+// UpdateNodeOptions specify parameters to the NodeUpdate function.
+//
+// See http://goo.gl/VPBFgA for more details.
+type UpdateNodeOptions struct {
+ swarm.NodeSpec
+ Version uint64
+ Context context.Context
+}
+
+// UpdateNode updates a node.
+//
+// See http://goo.gl/VPBFgA for more details.
+func (c *Client) UpdateNode(id string, opts UpdateNodeOptions) error {
+ params := make(url.Values)
+ params.Set("version", strconv.FormatUint(opts.Version, 10))
+ path := "/nodes/" + id + "/update?" + params.Encode()
+ resp, err := c.do("POST", path, doOptions{
+ context: opts.Context,
+ forceJSON: true,
+ data: opts.NodeSpec,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNode{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RemoveNodeOptions specify parameters to the RemoveNode function.
+//
+// See http://goo.gl/0SNvYg for more details.
+type RemoveNodeOptions struct {
+ ID string
+ Force bool
+ Context context.Context
+}
+
+// RemoveNode removes a node.
+//
+// See http://goo.gl/0SNvYg for more details.
+func (c *Client) RemoveNode(opts RemoveNodeOptions) error {
+ params := make(url.Values)
+ params.Set("force", strconv.FormatBool(opts.Force))
+ path := "/nodes/" + opts.ID + "?" + params.Encode()
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNode{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go
new file mode 100644
index 000000000..5a3b82ca5
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_secrets.go
@@ -0,0 +1,171 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NoSuchSecret is the error returned when a given secret does not exist.
+type NoSuchSecret struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchSecret) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such secret: " + err.ID
+}
+
+// CreateSecretOptions specify parameters to the CreateSecret function.
+//
+// See https://goo.gl/KrVjHz for more details.
+type CreateSecretOptions struct {
+ Auth AuthConfiguration `qs:"-"`
+ swarm.SecretSpec
+ Context context.Context
+}
+
+// CreateSecret creates a new secret, returning the secret instance
+// or an error in case of failure.
+//
+// See https://goo.gl/KrVjHz for more details.
+func (c *Client) CreateSecret(opts CreateSecretOptions) (*swarm.Secret, error) {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return nil, err
+ }
+ path := "/secrets/create?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{
+ headers: headers,
+ data: opts.SecretSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var secret swarm.Secret
+ if err := json.NewDecoder(resp.Body).Decode(&secret); err != nil {
+ return nil, err
+ }
+ return &secret, nil
+}
+
+// RemoveSecretOptions encapsulates options to remove a secret.
+//
+// See https://goo.gl/Tqrtya for more details.
+type RemoveSecretOptions struct {
+ ID string `qs:"-"`
+ Context context.Context
+}
+
+// RemoveSecret removes a secret, returning an error in case of failure.
+//
+// See https://goo.gl/Tqrtya for more details.
+func (c *Client) RemoveSecret(opts RemoveSecretOptions) error {
+ path := "/secrets/" + opts.ID
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchSecret{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UpdateSecretOptions specify parameters to the UpdateSecret function.
+//
+// Only label can be updated
+// See https://docs.docker.com/engine/api/v1.33/#operation/SecretUpdate
+// See https://goo.gl/wu3MmS for more details.
+type UpdateSecretOptions struct {
+ Auth AuthConfiguration `qs:"-"`
+ swarm.SecretSpec
+ Context context.Context
+ Version uint64
+}
+
+// UpdateSecret updates the secret at ID with the options
+//
+// See https://goo.gl/wu3MmS for more details.
+func (c *Client) UpdateSecret(id string, opts UpdateSecretOptions) error {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return err
+ }
+ params := make(url.Values)
+ params.Set("version", strconv.FormatUint(opts.Version, 10))
+ resp, err := c.do("POST", "/secrets/"+id+"/update?"+params.Encode(), doOptions{
+ headers: headers,
+ data: opts.SecretSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchSecret{ID: id}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
+
+// InspectSecret returns information about a secret by its ID.
+//
+// See https://goo.gl/dHmr75 for more details.
+func (c *Client) InspectSecret(id string) (*swarm.Secret, error) {
+ path := "/secrets/" + id
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchSecret{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var secret swarm.Secret
+ if err := json.NewDecoder(resp.Body).Decode(&secret); err != nil {
+ return nil, err
+ }
+ return &secret, nil
+}
+
+// ListSecretsOptions specify parameters to the ListSecrets function.
+//
+// See https://goo.gl/DwvNMd for more details.
+type ListSecretsOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListSecrets returns a slice of secrets matching the given criteria.
+//
+// See https://goo.gl/DwvNMd for more details.
+func (c *Client) ListSecrets(opts ListSecretsOptions) ([]swarm.Secret, error) {
+ path := "/secrets?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var secrets []swarm.Secret
+ if err := json.NewDecoder(resp.Body).Decode(&secrets); err != nil {
+ return nil, err
+ }
+ return secrets, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_service.go b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
new file mode 100644
index 000000000..ca7e23725
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_service.go
@@ -0,0 +1,213 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NoSuchService is the error returned when a given service does not exist.
+type NoSuchService struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchService) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such service: " + err.ID
+}
+
+// CreateServiceOptions specify parameters to the CreateService function.
+//
+// See https://goo.gl/KrVjHz for more details.
+type CreateServiceOptions struct {
+ Auth AuthConfiguration `qs:"-"`
+ swarm.ServiceSpec
+ Context context.Context
+}
+
+// CreateService creates a new service, returning the service instance
+// or an error in case of failure.
+//
+// See https://goo.gl/KrVjHz for more details.
+func (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error) {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return nil, err
+ }
+ path := "/services/create?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{
+ headers: headers,
+ data: opts.ServiceSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var service swarm.Service
+ if err := json.NewDecoder(resp.Body).Decode(&service); err != nil {
+ return nil, err
+ }
+ return &service, nil
+}
+
+// RemoveServiceOptions encapsulates options to remove a service.
+//
+// See https://goo.gl/Tqrtya for more details.
+type RemoveServiceOptions struct {
+ ID string `qs:"-"`
+ Context context.Context
+}
+
+// RemoveService removes a service, returning an error in case of failure.
+//
+// See https://goo.gl/Tqrtya for more details.
+func (c *Client) RemoveService(opts RemoveServiceOptions) error {
+ path := "/services/" + opts.ID
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchService{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UpdateServiceOptions specify parameters to the UpdateService function.
+//
+// See https://goo.gl/wu3MmS for more details.
+type UpdateServiceOptions struct {
+ Auth AuthConfiguration `qs:"-"`
+ swarm.ServiceSpec `qs:"-"`
+ Context context.Context
+ Version uint64
+ Rollback string
+}
+
+// UpdateService updates the service at ID with the options
+//
+// See https://goo.gl/wu3MmS for more details.
+func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error {
+ headers, err := headersWithAuth(opts.Auth)
+ if err != nil {
+ return err
+ }
+ resp, err := c.do("POST", "/services/"+id+"/update?"+queryString(opts), doOptions{
+ headers: headers,
+ data: opts.ServiceSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchService{ID: id}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
+
+// InspectService returns information about a service by its ID.
+//
+// See https://goo.gl/dHmr75 for more details.
+func (c *Client) InspectService(id string) (*swarm.Service, error) {
+ path := "/services/" + id
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchService{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var service swarm.Service
+ if err := json.NewDecoder(resp.Body).Decode(&service); err != nil {
+ return nil, err
+ }
+ return &service, nil
+}
+
+// ListServicesOptions specify parameters to the ListServices function.
+//
+// See https://goo.gl/DwvNMd for more details.
+type ListServicesOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListServices returns a slice of services matching the given criteria.
+//
+// See https://goo.gl/DwvNMd for more details.
+func (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) {
+ path := "/services?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var services []swarm.Service
+ if err := json.NewDecoder(resp.Body).Decode(&services); err != nil {
+ return nil, err
+ }
+ return services, nil
+}
+
+// LogsServiceOptions represents the set of options used when getting logs from a
+// service.
+type LogsServiceOptions struct {
+ Context context.Context
+ Service string `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Tail string
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+ Since int64
+ Follow bool
+ Stdout bool
+ Stderr bool
+ Timestamps bool
+ Details bool
+}
+
+// GetServiceLogs gets stdout and stderr logs from the specified service.
+//
+// When LogsServiceOptions.RawTerminal is set to false, go-dockerclient will multiplex
+// the streams and send the containers stdout to LogsServiceOptions.OutputStream, and
+// stderr to LogsServiceOptions.ErrorStream.
+//
+// When LogsServiceOptions.RawTerminal is true, callers will get the raw stream on
+// LogsServiceOptions.OutputStream.
+func (c *Client) GetServiceLogs(opts LogsServiceOptions) error {
+ if opts.Service == "" {
+ return &NoSuchService{ID: opts.Service}
+ }
+ if opts.Tail == "" {
+ opts.Tail = "all"
+ }
+ path := "/services/" + opts.Service + "/logs?" + queryString(opts)
+ return c.stream("GET", path, streamOptions{
+ setRawTerminal: opts.RawTerminal,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm_task.go b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go
new file mode 100644
index 000000000..3b1161ab9
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm_task.go
@@ -0,0 +1,70 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+)
+
+// NoSuchTask is the error returned when a given task does not exist.
+type NoSuchTask struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchTask) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such task: " + err.ID
+}
+
+// ListTasksOptions specify parameters to the ListTasks function.
+//
+// See http://goo.gl/rByLzw for more details.
+type ListTasksOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListTasks returns a slice of tasks matching the given criteria.
+//
+// See http://goo.gl/rByLzw for more details.
+func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) {
+ path := "/tasks?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var tasks []swarm.Task
+ if err := json.NewDecoder(resp.Body).Decode(&tasks); err != nil {
+ return nil, err
+ }
+ return tasks, nil
+}
+
+// InspectTask returns information about a task by its ID.
+//
+// See http://goo.gl/kyziuq for more details.
+func (c *Client) InspectTask(id string) (*swarm.Task, error) {
+ resp, err := c.do("GET", "/tasks/"+id, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchTask{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var task swarm.Task
+ if err := json.NewDecoder(resp.Body).Decode(&task); err != nil {
+ return nil, err
+ }
+ return &task, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go
new file mode 100644
index 000000000..9716a7712
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/tar.go
@@ -0,0 +1,122 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/fileutils"
+)
+
+func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
+ srcPath, err := filepath.Abs(srcPath)
+ if err != nil {
+ return nil, err
+ }
+
+ excludes, err := parseDockerignore(srcPath)
+ if err != nil {
+ return nil, err
+ }
+
+ includes := []string{"."}
+
+ // If .dockerignore mentions .dockerignore or the Dockerfile
+ // then make sure we send both files over to the daemon
+ // because Dockerfile is, obviously, needed no matter what, and
+ // .dockerignore is needed to know if either one needs to be
+ // removed. The deamon will remove them for us, if needed, after it
+ // parses the Dockerfile.
+ //
+ // https://github.com/docker/docker/issues/8330
+ //
+ forceIncludeFiles := []string{".dockerignore", dockerfilePath}
+
+ for _, includeFile := range forceIncludeFiles {
+ if includeFile == "" {
+ continue
+ }
+ keepThem, err := fileutils.Matches(includeFile, excludes)
+ if err != nil {
+ return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
+ }
+ if keepThem {
+ includes = append(includes, includeFile)
+ }
+ }
+
+ if err := validateContextDirectory(srcPath, excludes); err != nil {
+ return nil, err
+ }
+ tarOpts := &archive.TarOptions{
+ ExcludePatterns: excludes,
+ IncludeFiles: includes,
+ Compression: archive.Uncompressed,
+ NoLchown: true,
+ }
+ return archive.TarWithOptions(srcPath, tarOpts)
+}
+
+// validateContextDirectory checks if all the contents of the directory
+// can be read and returns an error if some files can't be read.
+// Symlinks which point to non-existing files don't trigger an error
+func validateContextDirectory(srcPath string, excludes []string) error {
+ return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
+ // skip this directory/file if it's not in the path, it won't get added to the context
+ if relFilePath, relErr := filepath.Rel(srcPath, filePath); relErr != nil {
+ return relErr
+ } else if skip, matchErr := fileutils.Matches(relFilePath, excludes); matchErr != nil {
+ return matchErr
+ } else if skip {
+ if f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ if err != nil {
+ if os.IsPermission(err) {
+ return fmt.Errorf("can't stat '%s'", filePath)
+ }
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ // skip checking if symlinks point to non-existing files, such symlinks can be useful
+ // also skip named pipes, because they hanging on open
+ if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
+ return nil
+ }
+
+ if !f.IsDir() {
+ currentFile, err := os.Open(filePath)
+ if err != nil && os.IsPermission(err) {
+ return fmt.Errorf("no permission to read from '%s'", filePath)
+ }
+ currentFile.Close()
+ }
+ return nil
+ })
+}
+
+func parseDockerignore(root string) ([]string, error) {
+ var excludes []string
+ ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
+ if err != nil && !os.IsNotExist(err) {
+ return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
+ }
+ excludes = strings.Split(string(ignore), "\n")
+
+ return excludes, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go
new file mode 100644
index 000000000..bb5790b5f
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/tls.go
@@ -0,0 +1,118 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The content is borrowed from Docker's own source code to provide a simple
+// tls based dialer
+
+package docker
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "time"
+)
+
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if cwc, ok := c.rawConn.(interface {
+ CloseWrite() error
+ }); ok {
+ return cwc.CloseWrite()
+ }
+ return nil
+}
+
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ rawConn, err := dialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ config = copyTLSConfig(config)
+ config.ServerName = hostname
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+// this exists to silent an error message in go vet
+func copyTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Certificates: cfg.Certificates,
+ CipherSuites: cfg.CipherSuites,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ ClientSessionCache: cfg.ClientSessionCache,
+ CurvePreferences: cfg.CurvePreferences,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ MaxVersion: cfg.MaxVersion,
+ MinVersion: cfg.MinVersion,
+ NameToCertificate: cfg.NameToCertificate,
+ NextProtos: cfg.NextProtos,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ Rand: cfg.Rand,
+ RootCAs: cfg.RootCAs,
+ ServerName: cfg.ServerName,
+ SessionTicketKey: cfg.SessionTicketKey,
+ SessionTicketsDisabled: cfg.SessionTicketsDisabled,
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go
new file mode 100644
index 000000000..021a262b7
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/volume.go
@@ -0,0 +1,190 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "net/http"
+)
+
+var (
+ // ErrNoSuchVolume is the error returned when the volume does not exist.
+ ErrNoSuchVolume = errors.New("no such volume")
+
+ // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use.
+ ErrVolumeInUse = errors.New("volume in use and cannot be removed")
+)
+
+// Volume represents a volume.
+//
+// See https://goo.gl/3wgTsd for more details.
+type Volume struct {
+ Name string `json:"Name" yaml:"Name" toml:"Name"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty" toml:"Driver,omitempty"`
+ Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty" toml:"Mountpoint,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty" toml:"Labels,omitempty"`
+ Options map[string]string `json:"Options,omitempty" yaml:"Options,omitempty" toml:"Options,omitempty"`
+}
+
+// ListVolumesOptions specify parameters to the ListVolumes function.
+//
+// See https://goo.gl/3wgTsd for more details.
+type ListVolumesOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListVolumes returns a list of available volumes in the server.
+//
+// See https://goo.gl/3wgTsd for more details.
+func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) {
+ resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{
+ context: opts.Context,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ m := make(map[string]interface{})
+ if err = json.NewDecoder(resp.Body).Decode(&m); err != nil {
+ return nil, err
+ }
+ var volumes []Volume
+ volumesJSON, ok := m["Volumes"]
+ if !ok {
+ return volumes, nil
+ }
+ data, err := json.Marshal(volumesJSON)
+ if err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(data, &volumes); err != nil {
+ return nil, err
+ }
+ return volumes, nil
+}
+
+// CreateVolumeOptions specify parameters to the CreateVolume function.
+//
+// See https://goo.gl/qEhmEC for more details.
+type CreateVolumeOptions struct {
+ Name string
+ Driver string
+ DriverOpts map[string]string
+ Context context.Context `json:"-"`
+ Labels map[string]string
+}
+
+// CreateVolume creates a volume on the server.
+//
+// See https://goo.gl/qEhmEC for more details.
+func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) {
+ resp, err := c.do("POST", "/volumes/create", doOptions{
+ data: opts,
+ context: opts.Context,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var volume Volume
+ if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil {
+ return nil, err
+ }
+ return &volume, nil
+}
+
+// InspectVolume returns a volume by its name.
+//
+// See https://goo.gl/GMjsMc for more details.
+func (c *Client) InspectVolume(name string) (*Volume, error) {
+ resp, err := c.do("GET", "/volumes/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchVolume
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var volume Volume
+ if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil {
+ return nil, err
+ }
+ return &volume, nil
+}
+
+// RemoveVolume removes a volume by its name.
+//
+// Deprecated: Use RemoveVolumeWithOptions instead.
+func (c *Client) RemoveVolume(name string) error {
+ return c.RemoveVolumeWithOptions(RemoveVolumeOptions{Name: name})
+}
+
+// RemoveVolumeOptions specify parameters to the RemoveVolumeWithOptions
+// function.
+//
+// See https://goo.gl/nvd6qj for more details.
+type RemoveVolumeOptions struct {
+ Context context.Context
+ Name string `qs:"-"`
+ Force bool
+}
+
+// RemoveVolumeWithOptions removes a volume by its name and takes extra
+// parameters.
+//
+// See https://goo.gl/nvd6qj for more details.
+func (c *Client) RemoveVolumeWithOptions(opts RemoveVolumeOptions) error {
+ path := "/volumes/" + opts.Name
+ resp, err := c.do("DELETE", path+"?"+queryString(opts), doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok {
+ if e.Status == http.StatusNotFound {
+ return ErrNoSuchVolume
+ }
+ if e.Status == http.StatusConflict {
+ return ErrVolumeInUse
+ }
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
+
+// PruneVolumesOptions specify parameters to the PruneVolumes function.
+//
+// See https://goo.gl/f9XDem for more details.
+type PruneVolumesOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// PruneVolumesResults specify results from the PruneVolumes function.
+//
+// See https://goo.gl/f9XDem for more details.
+type PruneVolumesResults struct {
+ VolumesDeleted []string
+ SpaceReclaimed int64
+}
+
+// PruneVolumes deletes volumes which are unused.
+//
+// See https://goo.gl/f9XDem for more details.
+func (c *Client) PruneVolumes(opts PruneVolumesOptions) (*PruneVolumesResults, error) {
+ path := "/volumes/prune?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var results PruneVolumesResults
+ if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
+ return nil, err
+ }
+ return &results, nil
+}
diff --git a/vendor/github.com/openshift/imagebuilder/LICENSE b/vendor/github.com/openshift/imagebuilder/LICENSE
new file mode 100644
index 000000000..ea21aad9d
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+ Copyright 2016 The OpenShift Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/openshift/imagebuilder/README.md b/vendor/github.com/openshift/imagebuilder/README.md
new file mode 100644
index 000000000..2f9c110dd
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/README.md
@@ -0,0 +1,104 @@
+Docker / OCI Image Builder
+==========================
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/openshift/imagebuilder)](https://goreportcard.com/report/github.com/openshift/imagebuilder)
+[![GoDoc](https://godoc.org/github.com/openshift/imagebuilder?status.png)](https://godoc.org/github.com/openshift/imagebuilder)
+[![Travis](https://travis-ci.org/openshift/imagebuilder.svg?branch=master)](https://travis-ci.org/openshift/imagebuilder)
+[![Join the chat at freenode:openshift-dev](https://img.shields.io/badge/irc-freenode%3A%20%23openshift--dev-blue.svg)](http://webchat.freenode.net/?channels=%23openshift-dev)
+
+Note: this library is beta and may contain bugs that prevent images from being identical to Docker build. Test your images (and add to our conformance suite)!
+
+This library supports using the Dockerfile syntax to build Docker
+compatible images, without invoking Docker build. It is intended to give
+clients more control over how a Docker build is run, including:
+
+* Instead of building one layer per line, run all instructions in the
+ same container
+* Set Docker HostConfig settings like network and memory controls that
+ are not available when running Docker builds
+* Mount external files into the build that are not persisted as part of
+ the final image (i.e. "secrets")
+* If there are no RUN commands in the Dockerfile, the container is created
+ and committed, but never started.
+
+The final image should be 99.9% compatible with regular docker builds,
+but bugs are always possible.
+
+Future goals include:
+
+* Output OCI compatible images
+* Support other container execution engines, like runc or rkt
+* Better conformance testing
+* Windows support
+
+## Install and Run
+
+To download and install the library and the binary, set up a Golang build environment and with `GOPATH` set run:
+
+```
+$ go get -u github.com/openshift/imagebuilder/cmd/imagebuilder
+```
+
+The included command line takes one argument, a path to a directory containing a Dockerfile. The `-t` option
+can be used to specify an image to tag as:
+
+```
+$ imagebuilder [-t TAG] DIRECTORY
+```
+
+To mount a file into the image for build that will not be present in the final output image, run:
+
+```
+$ imagebuilder --mount ~/secrets/private.key:/etc/keys/private.key path/to/my/code testimage
+```
+
+Any processes in the Dockerfile will have access to `/etc/keys/private.key`, but that file will not be part of the committed image.
+
+Running `--mount` requires Docker 1.10 or newer, as it uses a Docker volume to hold the mounted files and the volume API was not
+available in earlier versions.
+
+You can also customize which Dockerfile is run, or run multiple Dockerfiles in sequence (the FROM is ignored on
+later files):
+
+```
+$ imagebuilder -f Dockerfile:Dockerfile.extra .
+```
+
+will build the current directory and combine the first Dockerfile with the second. The FROM in the second image
+is ignored.
+
+
+## Code Example
+
+```
+f, err := os.Open("path/to/Dockerfile")
+if err != nil {
+ return err
+}
+defer f.Close()
+
+e := builder.NewClientExecutor(o.Client)
+e.Out, e.ErrOut = os.Stdout, os.Stderr
+e.AllowPull = true
+e.Directory = "context/directory"
+e.Tag = "name/of-image:and-tag"
+e.AuthFn = nil // ... pass a function to retrieve authorization info
+e.LogFn = func(format string, args ...interface{}) {
+ fmt.Fprintf(e.ErrOut, "--> %s\n", fmt.Sprintf(format, args...))
+}
+
+buildErr := e.Build(f, map[string]string{"arg1":"value1"})
+if err := e.Cleanup(); err != nil {
+ fmt.Fprintf(e.ErrOut, "error: Unable to clean up build: %v\n", err)
+}
+
+return buildErr
+```
+
+Example of usage from OpenShift's experimental `dockerbuild` [command with mount secrets](https://github.com/openshift/origin/blob/26c9e032ff42f613fe10649cd7c5fa1b4c33501b/pkg/cmd/cli/cmd/dockerbuild/dockerbuild.go)
+
+## Run conformance tests (very slow):
+
+```
+go test ./dockerclient/conformance_test.go -tags conformance
+```
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
new file mode 100644
index 000000000..6d6e4c38a
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -0,0 +1,510 @@
+package imagebuilder
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ docker "github.com/fsouza/go-dockerclient"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+ "github.com/docker/docker/builder/dockerfile/parser"
+)
+
+// Copy defines a copy operation required on the container.
+type Copy struct {
+ // If true, this is a copy from the file system to the container. If false,
+ // the copy is from the context.
+ FromFS bool
+ // If set, this is a copy from the named stage or image to the container.
+ From string
+ Src []string
+ Dest string
+ Download bool
+}
+
+// Run defines a run operation required in the container.
+type Run struct {
+ Shell bool
+ Args []string
+}
+
+type Executor interface {
+ Preserve(path string) error
+ Copy(excludes []string, copies ...Copy) error
+ Run(run Run, config docker.Config) error
+ UnrecognizedInstruction(step *Step) error
+}
+
+type logExecutor struct{}
+
+func (logExecutor) Preserve(path string) error {
+ log.Printf("PRESERVE %s", path)
+ return nil
+}
+
+func (logExecutor) Copy(excludes []string, copies ...Copy) error {
+ for _, c := range copies {
+ log.Printf("COPY %v -> %s (from:%s download:%t)", c.Src, c.Dest, c.From, c.Download)
+ }
+ return nil
+}
+
+func (logExecutor) Run(run Run, config docker.Config) error {
+ log.Printf("RUN %v %t (%v)", run.Args, run.Shell, config.Env)
+ return nil
+}
+
+func (logExecutor) UnrecognizedInstruction(step *Step) error {
+ log.Printf("Unknown instruction: %s", strings.ToUpper(step.Command))
+ return nil
+}
+
+type noopExecutor struct{}
+
+func (noopExecutor) Preserve(path string) error {
+ return nil
+}
+
+func (noopExecutor) Copy(excludes []string, copies ...Copy) error {
+ return nil
+}
+
+func (noopExecutor) Run(run Run, config docker.Config) error {
+ return nil
+}
+
+func (noopExecutor) UnrecognizedInstruction(step *Step) error {
+ return nil
+}
+
+type VolumeSet []string
+
+func (s *VolumeSet) Add(path string) bool {
+ if path == "/" {
+ set := len(*s) != 1 || (*s)[0] != ""
+ *s = []string{""}
+ return set
+ }
+ path = strings.TrimSuffix(path, "/")
+ var adjusted []string
+ for _, p := range *s {
+ if p == path || strings.HasPrefix(path, p+"/") {
+ return false
+ }
+ if strings.HasPrefix(p, path+"/") {
+ continue
+ }
+ adjusted = append(adjusted, p)
+ }
+ adjusted = append(adjusted, path)
+ *s = adjusted
+ return true
+}
+
+func (s VolumeSet) Has(path string) bool {
+ if path == "/" {
+ return len(s) == 1 && s[0] == ""
+ }
+ path = strings.TrimSuffix(path, "/")
+ for _, p := range s {
+ if p == path {
+ return true
+ }
+ }
+ return false
+}
+
+func (s VolumeSet) Covers(path string) bool {
+ if path == "/" {
+ return len(s) == 1 && s[0] == ""
+ }
+ path = strings.TrimSuffix(path, "/")
+ for _, p := range s {
+ if p == path || strings.HasPrefix(path, p+"/") {
+ return true
+ }
+ }
+ return false
+}
+
+var (
+ LogExecutor = logExecutor{}
+ NoopExecutor = noopExecutor{}
+)
+
+type Stages []Stage
+
+func (stages Stages) ByName(name string) (Stage, bool) {
+ for _, stage := range stages {
+ if stage.Name == name {
+ return stage, true
+ }
+ }
+ return Stage{}, false
+}
+
+func (stages Stages) ByTarget(target string) (Stages, bool) {
+ if len(target) == 0 {
+ return stages, true
+ }
+ for i, stage := range stages {
+ if stage.Name == target {
+ return stages[i : i+1], true
+ }
+ }
+ return nil, false
+}
+
+type Stage struct {
+ Position int
+ Name string
+ Builder *Builder
+ Node *parser.Node
+}
+
+func NewStages(node *parser.Node, b *Builder) Stages {
+ var stages Stages
+ for i, root := range SplitBy(node, command.From) {
+ name, _ := extractNameFromNode(root.Children[0])
+ if len(name) == 0 {
+ name = strconv.Itoa(i)
+ }
+ stages = append(stages, Stage{
+ Position: i,
+ Name: name,
+ Builder: &Builder{
+ Args: b.Args,
+ AllowedArgs: b.AllowedArgs,
+ },
+ Node: root,
+ })
+ }
+ return stages
+}
+
+func extractNameFromNode(node *parser.Node) (string, bool) {
+ if node.Value != command.From {
+ return "", false
+ }
+ n := node.Next
+ if n == nil || n.Next == nil {
+ return "", false
+ }
+ n = n.Next
+ if !strings.EqualFold(n.Value, "as") || n.Next == nil || len(n.Next.Value) == 0 {
+ return "", false
+ }
+ return n.Next.Value, true
+}
+
+type Builder struct {
+ RunConfig docker.Config
+
+ Env []string
+ Args map[string]string
+ CmdSet bool
+ Author string
+
+ AllowedArgs map[string]bool
+ Volumes VolumeSet
+ Excludes []string
+
+ PendingVolumes VolumeSet
+ PendingRuns []Run
+ PendingCopies []Copy
+
+ Warnings []string
+}
+
+func NewBuilder(args map[string]string) *Builder {
+ allowed := make(map[string]bool)
+ for k, v := range builtinAllowedBuildArgs {
+ allowed[k] = v
+ }
+ return &Builder{
+ Args: args,
+ AllowedArgs: allowed,
+ }
+}
+
+func ParseFile(path string) (*parser.Node, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseDockerfile(f)
+}
+
+// Step creates a new step from the current state.
+func (b *Builder) Step() *Step {
+ dst := make([]string, len(b.Env)+len(b.RunConfig.Env))
+ copy(dst, b.Env)
+ dst = append(dst, b.RunConfig.Env...)
+ dst = append(dst, b.Arguments()...)
+ return &Step{Env: dst}
+}
+
+// Run executes a step, transforming the current builder and
+// invoking any Copy or Run operations. noRunsRemaining is an
+// optimization hint that allows the builder to avoid performing
+// unnecessary work.
+func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error {
+ fn, ok := evaluateTable[step.Command]
+ if !ok {
+ return exec.UnrecognizedInstruction(step)
+ }
+ if err := fn(b, step.Args, step.Attrs, step.Flags, step.Original); err != nil {
+ return err
+ }
+
+ copies := b.PendingCopies
+ b.PendingCopies = nil
+ runs := b.PendingRuns
+ b.PendingRuns = nil
+
+ // Once a VOLUME is defined, future ADD/COPY instructions are
+ // all that may mutate that path. Instruct the executor to preserve
+ // the path. The executor must handle invalidating preserved info.
+ for _, path := range b.PendingVolumes {
+ if b.Volumes.Add(path) && !noRunsRemaining {
+ if err := exec.Preserve(path); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := exec.Copy(b.Excludes, copies...); err != nil {
+ return err
+ }
+ for _, run := range runs {
+ config := b.Config()
+ config.Env = step.Env
+ if err := exec.Run(run, *config); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// RequiresStart returns true if a running container environment is necessary
+// to invoke the provided commands
+func (b *Builder) RequiresStart(node *parser.Node) bool {
+ for _, child := range node.Children {
+ if child.Value == command.Run {
+ return true
+ }
+ }
+ return false
+}
+
+// Config returns a snapshot of the current RunConfig intended for
+// use with a container commit.
+func (b *Builder) Config() *docker.Config {
+ config := b.RunConfig
+ if config.OnBuild == nil {
+ config.OnBuild = []string{}
+ }
+ if config.Entrypoint == nil {
+ config.Entrypoint = []string{}
+ }
+ config.Image = ""
+ return &config
+}
+
+// Arguments returns the currently active arguments.
+func (b *Builder) Arguments() []string {
+ var envs []string
+ for key, val := range b.Args {
+ if _, ok := b.AllowedArgs[key]; ok {
+ envs = append(envs, fmt.Sprintf("%s=%s", key, val))
+ }
+ }
+ return envs
+}
+
+// ErrNoFROM is returned if the Dockerfile did not contain a FROM
+// statement.
+var ErrNoFROM = fmt.Errorf("no FROM statement found")
+
+// From returns the image this dockerfile depends on, or an error
+// if no FROM is found or if multiple FROM are specified. If a
+// single from is found the passed node is updated with only
+// the remaining statements. The builder's RunConfig.Image field
+// is set to the first From found, or left unchanged if already
+// set.
+func (b *Builder) From(node *parser.Node) (string, error) {
+ children := SplitChildren(node, command.From)
+ switch {
+ case len(children) == 0:
+ return "", ErrNoFROM
+ case len(children) > 1:
+ return "", fmt.Errorf("multiple FROM statements are not supported")
+ default:
+ step := b.Step()
+ if err := step.Resolve(children[0]); err != nil {
+ return "", err
+ }
+ if err := b.Run(step, NoopExecutor, false); err != nil {
+ return "", err
+ }
+ return b.RunConfig.Image, nil
+ }
+}
+
+// FromImage updates the builder to use the provided image (resetting RunConfig
+// and recording the image environment), and updates the node with any ONBUILD
+// statements extracted from the parent image.
+func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error {
+ SplitChildren(node, command.From)
+
+ b.RunConfig = *image.Config
+ b.Env = b.RunConfig.Env
+ b.RunConfig.Env = nil
+
+ // Check to see if we have a default PATH, note that windows won't
+ // have one as its set by HCS
+ if runtime.GOOS != "windows" && !hasEnvName(b.Env, "PATH") {
+ b.RunConfig.Env = append(b.RunConfig.Env, "PATH="+defaultPathEnv)
+ }
+
+ // Join the image onbuild statements into node
+ if image.Config == nil || len(image.Config.OnBuild) == 0 {
+ return nil
+ }
+ extra, err := ParseDockerfile(bytes.NewBufferString(strings.Join(image.Config.OnBuild, "\n")))
+ if err != nil {
+ return err
+ }
+ for _, child := range extra.Children {
+ switch strings.ToUpper(child.Value) {
+ case "ONBUILD":
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+ case "MAINTAINER", "FROM":
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", child.Value)
+ }
+ }
+ node.Children = append(extra.Children, node.Children...)
+ // Since we've processed the OnBuild statements, clear them from the runconfig state.
+ b.RunConfig.OnBuild = nil
+ return nil
+}
+
+// SplitChildren removes any children with the provided value from node
+// and returns them as an array. node.Children is updated.
+func SplitChildren(node *parser.Node, value string) []*parser.Node {
+ var split []*parser.Node
+ var children []*parser.Node
+ for _, child := range node.Children {
+ if child.Value == value {
+ split = append(split, child)
+ } else {
+ children = append(children, child)
+ }
+ }
+ node.Children = children
+ return split
+}
+
+func SplitBy(node *parser.Node, value string) []*parser.Node {
+ var split []*parser.Node
+ var current *parser.Node
+ for _, child := range node.Children {
+ if current == nil || child.Value == value {
+ copied := *node
+ current = &copied
+ current.Children = nil
+ current.Next = nil
+ split = append(split, current)
+ }
+ current.Children = append(current.Children, child)
+ }
+ return split
+}
+
+// StepFunc is invoked with the result of a resolved step.
+type StepFunc func(*Builder, []string, map[string]bool, []string, string) error
+
+var evaluateTable = map[string]StepFunc{
+ command.Env: env,
+ command.Label: label,
+ command.Maintainer: maintainer,
+ command.Add: add,
+ command.Copy: dispatchCopy, // copy() is a go builtin
+ command.From: from,
+ command.Onbuild: onbuild,
+ command.Workdir: workdir,
+ command.Run: run,
+ command.Cmd: cmd,
+ command.Entrypoint: entrypoint,
+ command.Expose: expose,
+ command.Volume: volume,
+ command.User: user,
+ command.StopSignal: stopSignal,
+ command.Arg: arg,
+ command.Healthcheck: healthcheck,
+ command.Shell: shell,
+}
+
+// builtinAllowedBuildArgs is list of built-in allowed build args
+var builtinAllowedBuildArgs = map[string]bool{
+ "HTTP_PROXY": true,
+ "http_proxy": true,
+ "HTTPS_PROXY": true,
+ "https_proxy": true,
+ "FTP_PROXY": true,
+ "ftp_proxy": true,
+ "NO_PROXY": true,
+ "no_proxy": true,
+}
+
+// ParseDockerIgnore returns a list of the excludes in the .dockerignore file.
+// extracted from fsouza/go-dockerclient.
+func ParseDockerignore(root string) ([]string, error) {
+ var excludes []string
+ ignore, err := ioutil.ReadFile(filepath.Join(root, ".dockerignore"))
+ if err != nil && !os.IsNotExist(err) {
+ return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
+ }
+ return strings.Split(string(ignore), "\n"), nil
+}
+
+// ExportEnv creates an export statement for a shell that contains all of the
+// provided environment.
+func ExportEnv(env []string) string {
+ if len(env) == 0 {
+ return ""
+ }
+ out := "export"
+ for _, e := range env {
+ if len(e) == 0 {
+ continue
+ }
+ out += " " + BashQuote(e)
+ }
+ return out + "; "
+}
+
+// BashQuote escapes the provided string and surrounds it with double quotes.
+// TODO: verify that these are all we have to escape.
+func BashQuote(env string) string {
+ out := []rune{'"'}
+ for _, r := range env {
+ switch r {
+ case '$', '\\', '"':
+ out = append(out, '\\', r)
+ default:
+ out = append(out, r)
+ }
+ }
+ out = append(out, '"')
+ return string(out)
+}
diff --git a/vendor/github.com/openshift/imagebuilder/constants.go b/vendor/github.com/openshift/imagebuilder/constants.go
new file mode 100644
index 000000000..86cd2e5e2
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/constants.go
@@ -0,0 +1,13 @@
+package imagebuilder
+
+const (
+ // in docker/system
+ NoBaseImageSpecifier = "scratch"
+
+ // not yet part of our import
+ commandArg = "arg"
+ commandStopSignal = "stopsignal"
+
+ // in docker/system
+ defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+)
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
new file mode 100644
index 000000000..afa04bb89
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -0,0 +1,570 @@
+package imagebuilder
+
+// This file contains the dispatchers for each command. Note that
+// `nullDispatch` is not actually a command, but support for commands we parse
+// but do nothing with.
+//
+// See evaluator.go for a higher level discussion of the whole evaluator
+// package.
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+
+ docker "github.com/fsouza/go-dockerclient"
+
+ "github.com/openshift/imagebuilder/signal"
+ "github.com/openshift/imagebuilder/strslice"
+)
+
+var (
+ obRgex = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`)
+)
+
+// dispatch with no layer / parsing. This is effectively not a command.
+func nullDispatch(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ return nil
+}
+
+// ENV foo bar
+//
+// Sets the environment variable foo to bar, also makes interpolation
+// in the dockerfile available from the next statement on via ${foo}.
+//
+func env(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("ENV")
+ }
+
+ if len(args)%2 != 0 {
+ // should never get here, but just in case
+ return errTooManyArguments("ENV")
+ }
+
+ // TODO/FIXME/NOT USED
+ // Just here to show how to use the builder flags stuff within the
+ // context of a builder command. Will remove once we actually add
+ // a builder command to something!
+ /*
+ flBool1 := b.flags.AddBool("bool1", false)
+ flStr1 := b.flags.AddString("str1", "HI")
+
+ if err := b.flags.Parse(); err != nil {
+ return err
+ }
+
+ fmt.Printf("Bool1:%v\n", flBool1)
+ fmt.Printf("Str1:%v\n", flStr1)
+ */
+
+ for j := 0; j < len(args); j++ {
+ // name ==> args[j]
+ // value ==> args[j+1]
+ newVar := args[j] + "=" + args[j+1] + ""
+ gotOne := false
+ for i, envVar := range b.RunConfig.Env {
+ envParts := strings.SplitN(envVar, "=", 2)
+ if envParts[0] == args[j] {
+ b.RunConfig.Env[i] = newVar
+ b.Env = append([]string{newVar}, b.Env...)
+ gotOne = true
+ break
+ }
+ }
+ if !gotOne {
+ b.RunConfig.Env = append(b.RunConfig.Env, newVar)
+ b.Env = append([]string{newVar}, b.Env...)
+ }
+ j++
+ }
+
+ return nil
+}
+
+// MAINTAINER some text <maybe@an.email.address>
+//
+// Sets the maintainer metadata.
+func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("MAINTAINER")
+ }
+ b.Author = args[0]
+ return nil
+}
+
+// LABEL some json data describing the image
+//
+// Sets the Label variable foo to bar,
+//
+func label(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("LABEL")
+ }
+ if len(args)%2 != 0 {
+ // should never get here, but just in case
+ return errTooManyArguments("LABEL")
+ }
+
+ if b.RunConfig.Labels == nil {
+ b.RunConfig.Labels = map[string]string{}
+ }
+
+ for j := 0; j < len(args); j++ {
+ // name ==> args[j]
+ // value ==> args[j+1]
+ b.RunConfig.Labels[args[j]] = args[j+1]
+ j++
+ }
+ return nil
+}
+
+// ADD foo /path
+//
+// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
+// exist here. If you do not wish to have this automatic handling, use COPY.
+//
+func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) < 2 {
+ return errAtLeastOneArgument("ADD")
+ }
+ last := len(args) - 1
+ dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
+ b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0:last], Dest: dest, Download: true})
+ return nil
+}
+
+// COPY foo /path
+//
+// Same as 'ADD' but without the tar and remote url handling.
+//
+func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) < 2 {
+ return errAtLeastOneArgument("COPY")
+ }
+ last := len(args) - 1
+ dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
+ var from string
+ if len(flagArgs) > 0 {
+ for _, arg := range flagArgs {
+ switch {
+ case strings.HasPrefix(arg, "--from="):
+ from = strings.TrimPrefix(arg, "--from=")
+ default:
+ return fmt.Errorf("COPY only supports the --from=<image|stage> flag")
+ }
+ }
+ }
+ b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false})
+ return nil
+}
+
+// FROM imagename
+//
+// This sets the image the dockerfile will build on top of.
+//
+func from(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ switch {
+ case len(args) == 1:
+ case len(args) == 3 && len(args[0]) > 0 && strings.EqualFold(args[1], "as") && len(args[2]) > 0:
+
+ default:
+ return fmt.Errorf("FROM requires either one argument, or three: FROM <source> [as <name>]")
+ }
+
+ name := args[0]
+ // Windows cannot support a container with no base image.
+ if name == NoBaseImageSpecifier {
+ if runtime.GOOS == "windows" {
+ return fmt.Errorf("Windows does not support FROM scratch")
+ }
+ }
+ b.RunConfig.Image = name
+ // TODO: handle onbuild
+ return nil
+}
+
+// ONBUILD RUN echo yo
+//
+// ONBUILD triggers run when the image is used in a FROM statement.
+//
+// ONBUILD handling has a lot of special-case functionality, the heading in
+// evaluator.go and comments around dispatch() in the same file explain the
+// special cases. search for 'OnBuild' in internals.go for additional special
+// cases.
+//
+func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("ONBUILD")
+ }
+
+ triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
+ switch triggerInstruction {
+ case "ONBUILD":
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+ case "MAINTAINER", "FROM":
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
+ }
+
+ original = obRgex.ReplaceAllString(original, "")
+
+ b.RunConfig.OnBuild = append(b.RunConfig.OnBuild, original)
+ return nil
+}
+
+// WORKDIR /tmp
+//
+// Set the working directory for future RUN/CMD/etc statements.
+//
+func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("WORKDIR")
+ }
+
+ // This is from the Dockerfile and will not necessarily be in platform
+ // specific semantics, hence ensure it is converted.
+ workdir := filepath.FromSlash(args[0])
+
+ if !filepath.IsAbs(workdir) {
+ current := filepath.FromSlash(b.RunConfig.WorkingDir)
+ workdir = filepath.Join(string(os.PathSeparator), current, workdir)
+ }
+
+ b.RunConfig.WorkingDir = workdir
+ return nil
+}
+
+// RUN some command yo
+//
+// run a command and commit the image. Args are automatically prepended with
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
+// only one argument. The difference in processing:
+//
+// RUN echo hi # sh -c echo hi (Linux)
+// RUN echo hi # cmd /S /C echo hi (Windows)
+// RUN [ "echo", "hi" ] # echo hi
+//
+func run(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if b.RunConfig.Image == "" {
+ return fmt.Errorf("Please provide a source image with `from` prior to run")
+ }
+
+ args = handleJSONArgs(args, attributes)
+
+ run := Run{Args: args}
+
+ if !attributes["json"] {
+ run.Shell = true
+ }
+ b.PendingRuns = append(b.PendingRuns, run)
+ return nil
+}
+
+// CMD foo
+//
+// Set the default command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+//
+func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ cmdSlice := handleJSONArgs(args, attributes)
+
+ if !attributes["json"] {
+ if runtime.GOOS != "windows" {
+ cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+ } else {
+ cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...)
+ }
+ }
+
+ b.RunConfig.Cmd = strslice.StrSlice(cmdSlice)
+ if len(args) != 0 {
+ b.CmdSet = true
+ }
+ return nil
+}
+
+// ENTRYPOINT /usr/sbin/nginx
+//
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx.
+//
+// Handles command processing similar to CMD and RUN, only b.RunConfig.Entrypoint
+// is initialized at NewBuilder time instead of through argument parsing.
+//
+func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ parsed := handleJSONArgs(args, attributes)
+
+ switch {
+ case attributes["json"]:
+ // ENTRYPOINT ["echo", "hi"]
+ b.RunConfig.Entrypoint = strslice.StrSlice(parsed)
+ case len(parsed) == 0:
+ // ENTRYPOINT []
+ b.RunConfig.Entrypoint = nil
+ default:
+ // ENTRYPOINT echo hi
+ if runtime.GOOS != "windows" {
+ b.RunConfig.Entrypoint = strslice.StrSlice{"/bin/sh", "-c", parsed[0]}
+ } else {
+ b.RunConfig.Entrypoint = strslice.StrSlice{"cmd", "/S", "/C", parsed[0]}
+ }
+ }
+
+ // when setting the entrypoint if a CMD was not explicitly set then
+ // set the command to nil
+ if !b.CmdSet {
+ b.RunConfig.Cmd = nil
+ }
+ return nil
+}
+
+// EXPOSE 6667/tcp 7000/tcp
+//
+// Expose ports for links and port mappings. This all ends up in
+// b.RunConfig.ExposedPorts for runconfig.
+//
+func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("EXPOSE")
+ }
+
+ if b.RunConfig.ExposedPorts == nil {
+ b.RunConfig.ExposedPorts = make(map[docker.Port]struct{})
+ }
+
+ existing := map[string]struct{}{}
+ for k := range b.RunConfig.ExposedPorts {
+ existing[k.Port()] = struct{}{}
+ }
+
+ for _, port := range args {
+ dp := docker.Port(port)
+ if _, exists := existing[dp.Port()]; !exists {
+ b.RunConfig.ExposedPorts[docker.Port(fmt.Sprintf("%s/%s", dp.Port(), dp.Proto()))] = struct{}{}
+ }
+ }
+ return nil
+}
+
+// USER foo
+//
+// Set the user to 'foo' for future commands and when running the
+// ENTRYPOINT/CMD at container run time.
+//
+func user(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("USER")
+ }
+
+ b.RunConfig.User = args[0]
+ return nil
+}
+
+// VOLUME /foo
+//
+// Expose the volume /foo for use. Will also accept the JSON array form.
+//
+func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("VOLUME")
+ }
+
+ if b.RunConfig.Volumes == nil {
+ b.RunConfig.Volumes = map[string]struct{}{}
+ }
+ for _, v := range args {
+ v = strings.TrimSpace(v)
+ if v == "" {
+ return fmt.Errorf("Volume specified can not be an empty string")
+ }
+ b.RunConfig.Volumes[v] = struct{}{}
+ b.PendingVolumes.Add(v)
+ }
+ return nil
+}
+
+// STOPSIGNAL signal
+//
+// Set the signal that will be used to kill the container.
+func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("STOPSIGNAL")
+ }
+
+ sig := args[0]
+ if err := signal.CheckSignal(sig); err != nil {
+ return err
+ }
+
+ b.RunConfig.StopSignal = sig
+ return nil
+}
+
+// HEALTHCHECK foo
+//
+// Set the default healthcheck command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+//
+func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("HEALTHCHECK")
+ }
+ typ := strings.ToUpper(args[0])
+ args = args[1:]
+ if typ == "NONE" {
+ if len(args) != 0 {
+ return fmt.Errorf("HEALTHCHECK NONE takes no arguments")
+ }
+ test := strslice.StrSlice{typ}
+ b.RunConfig.Healthcheck = &docker.HealthConfig{
+ Test: test,
+ }
+ } else {
+ if b.RunConfig.Healthcheck != nil {
+ oldCmd := b.RunConfig.Healthcheck.Test
+ if len(oldCmd) > 0 && oldCmd[0] != "NONE" {
+ b.Warnings = append(b.Warnings, fmt.Sprintf("Note: overriding previous HEALTHCHECK: %v\n", oldCmd))
+ }
+ }
+
+ healthcheck := docker.HealthConfig{}
+
+ flags := flag.NewFlagSet("", flag.ContinueOnError)
+ flags.String("interval", "", "")
+ flags.String("timeout", "", "")
+ flRetries := flags.String("retries", "", "")
+
+ if err := flags.Parse(flagArgs); err != nil {
+ return err
+ }
+
+ switch typ {
+ case "CMD":
+ cmdSlice := handleJSONArgs(args, attributes)
+ if len(cmdSlice) == 0 {
+ return fmt.Errorf("Missing command after HEALTHCHECK CMD")
+ }
+
+ if !attributes["json"] {
+ typ = "CMD-SHELL"
+ }
+
+ healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
+ default:
+ return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
+ }
+
+ interval, err := parseOptInterval(flags.Lookup("interval"))
+ if err != nil {
+ return err
+ }
+ healthcheck.Interval = interval
+
+ timeout, err := parseOptInterval(flags.Lookup("timeout"))
+ if err != nil {
+ return err
+ }
+ healthcheck.Timeout = timeout
+
+ if *flRetries != "" {
+ retries, err := strconv.ParseInt(*flRetries, 10, 32)
+ if err != nil {
+ return err
+ }
+ if retries < 1 {
+ return fmt.Errorf("--retries must be at least 1 (not %d)", retries)
+ }
+ healthcheck.Retries = int(retries)
+ } else {
+ healthcheck.Retries = 0
+ }
+ b.RunConfig.Healthcheck = &healthcheck
+ }
+
+ return nil
+}
+
+// ARG name[=value]
+//
+// Adds the variable foo to the trusted list of variables that can be passed
+// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
+// Dockerfile author may optionally set a default value of this variable.
+func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return fmt.Errorf("ARG requires exactly one argument definition")
+ }
+
+ var (
+ name string
+ value string
+ hasDefault bool
+ )
+
+ arg := args[0]
+ // 'arg' can just be a name or name-value pair. Note that this is different
+ // from 'env' that handles the split of name and value at the parser level.
+ // The reason for doing it differently for 'arg' is that we support just
+ // defining an arg and not assign it a value (while 'env' always expects a
+ // name-value pair). If possible, it will be good to harmonize the two.
+ if strings.Contains(arg, "=") {
+ parts := strings.SplitN(arg, "=", 2)
+ name = parts[0]
+ value = parts[1]
+ hasDefault = true
+ } else {
+ name = arg
+ hasDefault = false
+ }
+ // add the arg to allowed list of build-time args from this step on.
+ b.AllowedArgs[name] = true
+
+ // If there is a default value associated with this arg then add it to the
+ // b.buildArgs if one is not already passed to the builder. The args passed
+ // to builder override the default value of 'arg'.
+ if _, ok := b.Args[name]; !ok && hasDefault {
+ b.Args[name] = value
+ }
+
+ return nil
+}
+
+// SHELL powershell -command
+//
+// Set the non-default shell to use.
+func shell(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ shellSlice := handleJSONArgs(args, attributes)
+ switch {
+ case len(shellSlice) == 0:
+ // SHELL []
+ return errAtLeastOneArgument("SHELL")
+ case attributes["json"]:
+ // SHELL ["powershell", "-command"]
+ b.RunConfig.Shell = strslice.StrSlice(shellSlice)
+ // b.RunConfig.Shell = strslice.StrSlice(shellSlice)
+ default:
+ // SHELL powershell -command - not JSON
+ return errNotJSON("SHELL")
+ }
+ return nil
+}
+
+func errAtLeastOneArgument(command string) error {
+ return fmt.Errorf("%s requires at least one argument", command)
+}
+
+func errExactlyOneArgument(command string) error {
+ return fmt.Errorf("%s requires exactly one argument", command)
+}
+
+func errTooManyArguments(command string) error {
+ return fmt.Errorf("Bad input to %s, too many arguments", command)
+}
+
+func errNotJSON(command string) error {
+ return fmt.Errorf("%s requires the arguments to be in JSON form", command)
+}
diff --git a/vendor/github.com/openshift/imagebuilder/doc.go b/vendor/github.com/openshift/imagebuilder/doc.go
new file mode 100644
index 000000000..97028ffc8
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/doc.go
@@ -0,0 +1,6 @@
+// Package builder uses code from github.com/docker/docker/builder/* to implement
+// a Docker builder that does not create individual layers, but instead creates a
+// single layer.
+//
+// TODO: full windows support
+package imagebuilder
diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go
new file mode 100644
index 000000000..83263127e
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/evaluator.go
@@ -0,0 +1,160 @@
+package imagebuilder
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+ "github.com/docker/docker/builder/dockerfile/parser"
+)
+
+// ParseDockerfile parses the provided stream as a canonical Dockerfile
+func ParseDockerfile(r io.Reader) (*parser.Node, error) {
+ result, err := parser.Parse(r)
+ if err != nil {
+ return nil, err
+ }
+ return result.AST, nil
+}
+
+// Environment variable interpolation will happen on these statements only.
+var replaceEnvAllowed = map[string]bool{
+ command.Env: true,
+ command.Label: true,
+ command.Add: true,
+ command.Copy: true,
+ command.Workdir: true,
+ command.Expose: true,
+ command.Volume: true,
+ command.User: true,
+ commandStopSignal: true,
+ commandArg: true,
+}
+
+// Certain commands are allowed to have their args split into more
+// words after env var replacements. Meaning:
+// ENV foo="123 456"
+// EXPOSE $foo
+// should result in the same thing as:
+// EXPOSE 123 456
+// and not treat "123 456" as a single word.
+// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing.
+// Quotes will cause it to still be treated as single word.
+var allowWordExpansion = map[string]bool{
+ command.Expose: true,
+}
+
+// Step represents the input Env and the output command after all
+// post processing of the command arguments is done.
+type Step struct {
+ Env []string
+
+ Command string
+ Args []string
+ Flags []string
+ Attrs map[string]bool
+ Message string
+ Original string
+}
+
+// Resolve transforms a parsed Dockerfile line into a command to execute,
+// resolving any arguments.
+//
+// Almost all nodes will have this structure:
+// Child[Node, Node, Node] where Child is from parser.Node.Children and each
+// node comes from parser.Node.Next. This forms a "line" with a statement and
+// arguments and we process them in this normalized form by hitting
+// evaluateTable with the leaf nodes of the command and the Builder object.
+//
+// ONBUILD is a special case; in this case the parser will emit:
+// Child[Node, Child[Node, Node...]] where the first node is the literal
+// "onbuild" and the child entrypoint is the command of the ONBUILD statement,
+// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
+// deal with that, at least until it becomes more of a general concern with new
+// features.
+func (b *Step) Resolve(ast *parser.Node) error {
+ cmd := ast.Value
+ upperCasedCmd := strings.ToUpper(cmd)
+
+ // To ensure the user is given a decent error message if the platform
+ // on which the daemon is running does not support a builder command.
+ if err := platformSupports(strings.ToLower(cmd)); err != nil {
+ return err
+ }
+
+ attrs := ast.Attributes
+ original := ast.Original
+ flags := ast.Flags
+ strList := []string{}
+ msg := upperCasedCmd
+
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
+ if cmd == "onbuild" {
+ if ast.Next == nil {
+ return fmt.Errorf("ONBUILD requires at least one argument")
+ }
+ ast = ast.Next.Children[0]
+ strList = append(strList, ast.Value)
+ msg += " " + ast.Value
+
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
+ }
+
+ // count the number of nodes that we are going to traverse first
+ // so we can pre-create the argument and message array. This speeds up the
+ // allocation of those list a lot when they have a lot of arguments
+ cursor := ast
+ var n int
+ for cursor.Next != nil {
+ cursor = cursor.Next
+ n++
+ }
+ msgList := make([]string, n)
+
+ var i int
+ envs := b.Env
+ for ast.Next != nil {
+ ast = ast.Next
+ var str string
+ str = ast.Value
+ if replaceEnvAllowed[cmd] {
+ var err error
+ var words []string
+
+ if allowWordExpansion[cmd] {
+ words, err = ProcessWords(str, envs)
+ if err != nil {
+ return err
+ }
+ strList = append(strList, words...)
+ } else {
+ str, err = ProcessWord(str, envs)
+ if err != nil {
+ return err
+ }
+ strList = append(strList, str)
+ }
+ } else {
+ strList = append(strList, str)
+ }
+ msgList[i] = ast.Value
+ i++
+ }
+
+ msg += " " + strings.Join(msgList, " ")
+
+ b.Message = msg
+ b.Command = cmd
+ b.Args = strList
+ b.Original = original
+ b.Attrs = attrs
+ b.Flags = flags
+ return nil
+}
diff --git a/vendor/github.com/openshift/imagebuilder/internals.go b/vendor/github.com/openshift/imagebuilder/internals.go
new file mode 100644
index 000000000..9a8005bfc
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/internals.go
@@ -0,0 +1,83 @@
+package imagebuilder
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// hasEnvName returns true if the provided environment contains the named ENV var.
+func hasEnvName(env []string, name string) bool {
+ for _, e := range env {
+ if strings.HasPrefix(e, name+"=") {
+ return true
+ }
+ }
+ return false
+}
+
+// platformSupports is a short-term function to give users a quality error
+// message if a Dockerfile uses a command not supported on the platform.
+func platformSupports(command string) error {
+ if runtime.GOOS != "windows" {
+ return nil
+ }
+ switch command {
+ case "expose", "user", "stopsignal", "arg":
+ return fmt.Errorf("The daemon on this platform does not support the command '%s'", command)
+ }
+ return nil
+}
+
+func handleJSONArgs(args []string, attributes map[string]bool) []string {
+ if len(args) == 0 {
+ return []string{}
+ }
+
+ if attributes != nil && attributes["json"] {
+ return args
+ }
+
+ // literal string command, not an exec array
+ return []string{strings.Join(args, " ")}
+}
+
+// makeAbsolute ensures that the provided path is absolute.
+func makeAbsolute(dest, workingDir string) string {
+ // Twiddle the destination when its a relative path - meaning, make it
+ // relative to the WORKINGDIR
+ if !filepath.IsAbs(dest) {
+ hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) || strings.HasSuffix(dest, string(os.PathSeparator)+".")
+ dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
+
+ // Make sure we preserve any trailing slash
+ if hasSlash {
+ dest += string(os.PathSeparator)
+ }
+ }
+ return dest
+}
+
+// parseOptInterval(flag) is the duration of flag.Value, or 0 if
+// empty. An error is reported if the value is given and is not positive.
+func parseOptInterval(f *flag.Flag) (time.Duration, error) {
+ if f == nil {
+ return 0, fmt.Errorf("No flag defined")
+ }
+ s := f.Value.String()
+ if s == "" {
+ return 0, nil
+ }
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return 0, err
+ }
+ if d <= 0 {
+ return 0, fmt.Errorf("Interval %#v must be positive", f.Name)
+ }
+ return d, nil
+}
diff --git a/vendor/github.com/openshift/imagebuilder/shell_parser.go b/vendor/github.com/openshift/imagebuilder/shell_parser.go
new file mode 100644
index 000000000..65f1db6dc
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/shell_parser.go
@@ -0,0 +1,314 @@
+package imagebuilder
+
+// This will take a single word and an array of env variables and
+// process all quotes (" and ') as well as $xxx and ${xxx} env variable
+// tokens. Tries to mimic bash shell process.
+// It doesn't support all flavors of ${xx:...} formats but new ones can
+// be added by adding code to the "special ${} format processing" section
+
+import (
+ "fmt"
+ "strings"
+ "text/scanner"
+ "unicode"
+)
+
+type shellWord struct {
+ word string
+ scanner scanner.Scanner
+ envs []string
+ pos int
+}
+
+// ProcessWord will use the 'env' list of environment variables,
+// and replace any env var references in 'word'.
+func ProcessWord(word string, env []string) (string, error) {
+ sw := &shellWord{
+ word: word,
+ envs: env,
+ pos: 0,
+ }
+ sw.scanner.Init(strings.NewReader(word))
+ word, _, err := sw.process()
+ return word, err
+}
+
+// ProcessWords will use the 'env' list of environment variables,
+// and replace any env var references in 'word' then it will also
+// return a slice of strings which represents the 'word'
+// split up based on spaces - taking into account quotes. Note that
+// this splitting is done **after** the env var substitutions are done.
+// Note, each one is trimmed to remove leading and trailing spaces (unless
+// they are quoted", but ProcessWord retains spaces between words.
+func ProcessWords(word string, env []string) ([]string, error) {
+ sw := &shellWord{
+ word: word,
+ envs: env,
+ pos: 0,
+ }
+ sw.scanner.Init(strings.NewReader(word))
+ _, words, err := sw.process()
+ return words, err
+}
+
+func (sw *shellWord) process() (string, []string, error) {
+ return sw.processStopOn(scanner.EOF)
+}
+
+type wordsStruct struct {
+ word string
+ words []string
+ inWord bool
+}
+
+func (w *wordsStruct) addChar(ch rune) {
+ if unicode.IsSpace(ch) && w.inWord {
+ if len(w.word) != 0 {
+ w.words = append(w.words, w.word)
+ w.word = ""
+ w.inWord = false
+ }
+ } else if !unicode.IsSpace(ch) {
+ w.addRawChar(ch)
+ }
+}
+
+func (w *wordsStruct) addRawChar(ch rune) {
+ w.word += string(ch)
+ w.inWord = true
+}
+
+func (w *wordsStruct) addString(str string) {
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(str))
+ for scan.Peek() != scanner.EOF {
+ w.addChar(scan.Next())
+ }
+}
+
+func (w *wordsStruct) addRawString(str string) {
+ w.word += str
+ w.inWord = true
+}
+
+func (w *wordsStruct) getWords() []string {
+ if len(w.word) > 0 {
+ w.words = append(w.words, w.word)
+
+ // Just in case we're called again by mistake
+ w.word = ""
+ w.inWord = false
+ }
+ return w.words
+}
+
+// Process the word, starting at 'pos', and stop when we get to the
+// end of the word or the 'stopChar' character
+func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) {
+ var result string
+ var words wordsStruct
+
+ var charFuncMapping = map[rune]func() (string, error){
+ '\'': sw.processSingleQuote,
+ '"': sw.processDoubleQuote,
+ '$': sw.processDollar,
+ }
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+
+ if stopChar != scanner.EOF && ch == stopChar {
+ sw.scanner.Next()
+ break
+ }
+ if fn, ok := charFuncMapping[ch]; ok {
+ // Call special processing func for certain chars
+ tmp, err := fn()
+ if err != nil {
+ return "", []string{}, err
+ }
+ result += tmp
+
+ if ch == rune('$') {
+ words.addString(tmp)
+ } else {
+ words.addRawString(tmp)
+ }
+ } else {
+ // Not special, just add it to the result
+ ch = sw.scanner.Next()
+
+ if ch == '\\' {
+ // '\' escapes, except end of line
+
+ ch = sw.scanner.Next()
+
+ if ch == scanner.EOF {
+ break
+ }
+
+ words.addRawChar(ch)
+ } else {
+ words.addChar(ch)
+ }
+
+ result += string(ch)
+ }
+ }
+
+ return result, words.getWords(), nil
+}
+
+func (sw *shellWord) processSingleQuote() (string, error) {
+ // All chars between single quotes are taken as-is
+ // Note, you can't escape '
+ var result string
+
+ sw.scanner.Next()
+
+ for {
+ ch := sw.scanner.Next()
+ if ch == '\'' || ch == scanner.EOF {
+ break
+ }
+ result += string(ch)
+ }
+
+ return result, nil
+}
+
+func (sw *shellWord) processDoubleQuote() (string, error) {
+ // All chars up to the next " are taken as-is, even ', except any $ chars
+ // But you can escape " with a \
+ var result string
+
+ sw.scanner.Next()
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+ if ch == '"' {
+ sw.scanner.Next()
+ break
+ }
+ if ch == '$' {
+ tmp, err := sw.processDollar()
+ if err != nil {
+ return "", err
+ }
+ result += tmp
+ } else {
+ ch = sw.scanner.Next()
+ if ch == '\\' {
+ chNext := sw.scanner.Peek()
+
+ if chNext == scanner.EOF {
+ // Ignore \ at end of word
+ continue
+ }
+
+ if chNext == '"' || chNext == '$' {
+ // \" and \$ can be escaped, all other \'s are left as-is
+ ch = sw.scanner.Next()
+ }
+ }
+ result += string(ch)
+ }
+ }
+
+ return result, nil
+}
+
+func (sw *shellWord) processDollar() (string, error) {
+ sw.scanner.Next()
+ ch := sw.scanner.Peek()
+ if ch == '{' {
+ sw.scanner.Next()
+ name := sw.processName()
+ ch = sw.scanner.Peek()
+ if ch == '}' {
+ // Normal ${xx} case
+ sw.scanner.Next()
+ return sw.getEnv(name), nil
+ }
+ if ch == ':' {
+ // Special ${xx:...} format processing
+ // Yes it allows for recursive $'s in the ... spot
+
+ sw.scanner.Next() // skip over :
+ modifier := sw.scanner.Next()
+
+ word, _, err := sw.processStopOn('}')
+ if err != nil {
+ return "", err
+ }
+
+ // Grab the current value of the variable in question so we
+ // can use it to determine what to do based on the modifier
+ newValue := sw.getEnv(name)
+
+ switch modifier {
+ case '+':
+ if newValue != "" {
+ newValue = word
+ }
+ return newValue, nil
+
+ case '-':
+ if newValue == "" {
+ newValue = word
+ }
+ return newValue, nil
+
+ default:
+ return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word)
+ }
+ }
+ return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word)
+ }
+ // $xxx case
+ name := sw.processName()
+ if name == "" {
+ return "$", nil
+ }
+ return sw.getEnv(name), nil
+}
+
+func (sw *shellWord) processName() string {
+ // Read in a name (alphanumeric or _)
+ // If it starts with a numeric then just return $#
+ var name string
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+ if len(name) == 0 && unicode.IsDigit(ch) {
+ ch = sw.scanner.Next()
+ return string(ch)
+ }
+ if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
+ break
+ }
+ ch = sw.scanner.Next()
+ name += string(ch)
+ }
+
+ return name
+}
+
+func (sw *shellWord) getEnv(name string) string {
+ for _, env := range sw.envs {
+ i := strings.Index(env, "=")
+ if i < 0 {
+ if name == env {
+ // Should probably never get here, but just in case treat
+ // it like "var" and "var=" are the same
+ return ""
+ }
+ continue
+ }
+ if name != env[:i] {
+ continue
+ }
+ return env[i+1:]
+ }
+ return ""
+}
diff --git a/vendor/github.com/openshift/imagebuilder/signal/README.md b/vendor/github.com/openshift/imagebuilder/signal/README.md
new file mode 100644
index 000000000..2b237a594
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/signal/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file
diff --git a/vendor/github.com/openshift/imagebuilder/signal/signal.go b/vendor/github.com/openshift/imagebuilder/signal/signal.go
new file mode 100644
index 000000000..46493965d
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/signal/signal.go
@@ -0,0 +1,25 @@
+// Package signal provides helper functions for dealing with signals across
+// various operating systems.
+package signal
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// CheckSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func CheckSignal(rawSignal string) error {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return nil
+ }
+ if _, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]; !ok {
+ return fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/imagebuilder/signal/signals.go b/vendor/github.com/openshift/imagebuilder/signal/signals.go
new file mode 100644
index 000000000..41d6fbd95
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/signal/signals.go
@@ -0,0 +1,79 @@
+package signal
+
+// SignalMap is a map of supported signals.
+var SignalMap = map[string]struct{}{
+ "ABRT": {},
+ "ALRM": {},
+ "BUS": {},
+ "CHLD": {},
+ "CLD": {},
+ "CONT": {},
+ "FPE": {},
+ "HUP": {},
+ "ILL": {},
+ "INT": {},
+ "IO": {},
+ "IOT": {},
+ "KILL": {},
+ "PIPE": {},
+ "POLL": {},
+ "PROF": {},
+ "PWR": {},
+ "QUIT": {},
+ "SEGV": {},
+ "STKFLT": {},
+ "STOP": {},
+ "SYS": {},
+ "TERM": {},
+ "TRAP": {},
+ "TSTP": {},
+ "TTIN": {},
+ "TTOU": {},
+ "UNUSED": {},
+ "URG": {},
+ "USR1": {},
+ "USR2": {},
+ "VTALRM": {},
+ "WINCH": {},
+ "XCPU": {},
+ "XFSZ": {},
+ "RTMIN": {},
+ "RTMIN+1": {},
+ "RTMIN+2": {},
+ "RTMIN+3": {},
+ "RTMIN+4": {},
+ "RTMIN+5": {},
+ "RTMIN+6": {},
+ "RTMIN+7": {},
+ "RTMIN+8": {},
+ "RTMIN+9": {},
+ "RTMIN+10": {},
+ "RTMIN+11": {},
+ "RTMIN+12": {},
+ "RTMIN+13": {},
+ "RTMIN+14": {},
+ "RTMIN+15": {},
+ "RTMAX-14": {},
+ "RTMAX-13": {},
+ "RTMAX-12": {},
+ "RTMAX-11": {},
+ "RTMAX-10": {},
+ "RTMAX-9": {},
+ "RTMAX-8": {},
+ "RTMAX-7": {},
+ "RTMAX-6": {},
+ "RTMAX-5": {},
+ "RTMAX-4": {},
+ "RTMAX-3": {},
+ "RTMAX-2": {},
+ "RTMAX-1": {},
+ "RTMAX": {},
+
+ "BUG": {},
+ "EMT": {},
+ "INFO": {},
+
+ "BUF": {},
+ "LWP": {},
+ "THR": {},
+}
diff --git a/vendor/github.com/openshift/imagebuilder/strslice/strslice.go b/vendor/github.com/openshift/imagebuilder/strslice/strslice.go
new file mode 100644
index 000000000..bad493fb8
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/vendor/github.com/projectatomic/buildah/LICENSE b/vendor/github.com/projectatomic/buildah/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/projectatomic/buildah/README.md b/vendor/github.com/projectatomic/buildah/README.md
new file mode 100644
index 000000000..ef430153d
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/README.md
@@ -0,0 +1,79 @@
+![buildah logo](https://cdn.rawgit.com/projectatomic/buildah/master/logos/buildah-logo_large.png)
+
+# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building OCI container images
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/projectatomic/buildah)](https://goreportcard.com/report/github.com/projectatomic/buildah)
+[![Travis](https://travis-ci.org/projectatomic/buildah.svg?branch=master)](https://travis-ci.org/projectatomic/buildah)
+
+Note: this package is in alpha, but is close to being feature-complete.
+
+The Buildah package provides a command line tool that can be used to
+* create a working container, either from scratch or using an image as a starting point
+* create an image, either from a working container or via the instructions in a Dockerfile
+* images can be built in either the OCI image format or the traditional upstream docker image format
+* mount a working container's root filesystem for manipulation
+* unmount a working container's root filesystem
+* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
+* delete a working container or an image
+
+**[Changelog](CHANGELOG.md)**
+
+**[Installation notes](install.md)**
+
+**[Troubleshooting Guide](troubleshooting.md)**
+
+**[Tutorials](docs/tutorials/README.md)**
+
+## Example
+
+From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
+
+```bash
+$ cat > lighttpd.sh <<"EOF"
+#!/bin/bash -x
+
+ctr1=`buildah from ${1:-fedora}`
+
+## Get all updates and install our minimal httpd server
+buildah run $ctr1 -- dnf update -y
+buildah run $ctr1 -- dnf install -y lighttpd
+
+## Include some buildtime annotations
+buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1
+
+## Run our server and expose the port
+buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" $ctr1
+buildah config --port 80 $ctr1
+
+## Commit this container to an image name
+buildah commit $ctr1 ${2:-$USER/lighttpd}
+EOF
+
+$ chmod +x lighttpd.sh
+$ sudo ./lighttpd.sh
+```
+
+## Commands
+| Command | Description |
+| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
+| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. |
+| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. |
+| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. |
+| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. |
+| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. |
+| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
+| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
+| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
+| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
+| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
+| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
+| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
+| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
+| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
+| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. |
+| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. |
+| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information |
+
+**Future goals include:**
+* more CI tests
+* additional CLI commands (?)
diff --git a/vendor/github.com/projectatomic/buildah/add.go b/vendor/github.com/projectatomic/buildah/add.go
new file mode 100644
index 000000000..4fab5a8d7
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/add.go
@@ -0,0 +1,253 @@
+package buildah
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/libpod/pkg/chrootuser"
+ "github.com/sirupsen/logrus"
+)
+
+//AddAndCopyOptions holds options for add and copy commands.
+type AddAndCopyOptions struct {
+ Chown string
+}
+
+// addURL copies the contents of the source URL to the destination. This is
+// its own function so that deferred closes happen after we're done pulling
+// down each item of potentially many.
+func addURL(destination, srcurl string) error {
+ logrus.Debugf("saving %q to %q", srcurl, destination)
+ resp, err := http.Get(srcurl)
+ if err != nil {
+ return errors.Wrapf(err, "error getting %q", srcurl)
+ }
+ defer resp.Body.Close()
+ f, err := os.Create(destination)
+ if err != nil {
+ return errors.Wrapf(err, "error creating %q", destination)
+ }
+ if last := resp.Header.Get("Last-Modified"); last != "" {
+ if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil {
+ logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2)
+ } else {
+ defer func() {
+ if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil {
+ logrus.Debugf("error setting mtime to Last-Modified time %q: %v", last, err3)
+ }
+ }()
+ }
+ }
+ defer f.Close()
+ n, err := io.Copy(f, resp.Body)
+ if err != nil {
+ return errors.Wrapf(err, "error reading contents for %q", destination)
+ }
+ if resp.ContentLength >= 0 && n != resp.ContentLength {
+ return errors.Errorf("error reading contents for %q: wrong length (%d != %d)", destination, n, resp.ContentLength)
+ }
+ if err := f.Chmod(0600); err != nil {
+ return errors.Wrapf(err, "error setting permissions on %q", destination)
+ }
+ return nil
+}
+
+// Add copies the contents of the specified sources into the container's root
+// filesystem, optionally extracting contents of local files that look like
+// non-empty archives.
+func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
+ mountPoint, err := b.Mount(b.MountLabel)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err2 := b.Unmount(); err2 != nil {
+ logrus.Errorf("error unmounting container: %v", err2)
+ }
+ }()
+ // Find out which user (and group) the destination should belong to.
+ user, err := b.user(mountPoint, options.Chown)
+ if err != nil {
+ return err
+ }
+ dest := mountPoint
+ if destination != "" && filepath.IsAbs(destination) {
+ dest = filepath.Join(dest, destination)
+ } else {
+ if err = ensureDir(filepath.Join(dest, b.WorkDir()), user, 0755); err != nil {
+ return err
+ }
+ dest = filepath.Join(dest, b.WorkDir(), destination)
+ }
+ // If the destination was explicitly marked as a directory by ending it
+ // with a '/', create it so that we can be sure that it's a directory,
+ // and any files we're copying will be placed in the directory.
+ if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator {
+ if err = ensureDir(dest, user, 0755); err != nil {
+ return err
+ }
+ }
+ // Make sure the destination's parent directory is usable.
+ if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() {
+ return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest))
+ }
+ // Now look at the destination itself.
+ destfi, err := os.Stat(dest)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "couldn't determine what %q is", dest)
+ }
+ destfi = nil
+ }
+ if len(source) > 1 && (destfi == nil || !destfi.IsDir()) {
+ return errors.Errorf("destination %q is not a directory", dest)
+ }
+ for _, src := range source {
+ if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
+ // We assume that source is a file, and we're copying
+ // it to the destination. If the destination is
+ // already a directory, create a file inside of it.
+ // Otherwise, the destination is the file to which
+ // we'll save the contents.
+ url, err := url.Parse(src)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing URL %q", src)
+ }
+ d := dest
+ if destfi != nil && destfi.IsDir() {
+ d = filepath.Join(dest, path.Base(url.Path))
+ }
+ if err := addURL(d, src); err != nil {
+ return err
+ }
+ if err := setOwner("", d, user); err != nil {
+ return err
+ }
+ continue
+ }
+
+ glob, err := filepath.Glob(src)
+ if err != nil {
+ return errors.Wrapf(err, "invalid glob %q", src)
+ }
+ if len(glob) == 0 {
+ return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
+ }
+ for _, gsrc := range glob {
+ srcfi, err := os.Stat(gsrc)
+ if err != nil {
+ return errors.Wrapf(err, "error reading %q", gsrc)
+ }
+ if srcfi.IsDir() {
+ // The source is a directory, so copy the contents of
+ // the source directory into the target directory. Try
+ // to create it first, so that if there's a problem,
+ // we'll discover why that won't work.
+ if err = ensureDir(dest, user, 0755); err != nil {
+ return err
+ }
+ logrus.Debugf("copying %q to %q", gsrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
+ if err := copyWithTar(gsrc, dest); err != nil {
+ return errors.Wrapf(err, "error copying %q to %q", gsrc, dest)
+ }
+ if err := setOwner(gsrc, dest, user); err != nil {
+ return err
+ }
+ continue
+ }
+ if !extract || !archive.IsArchivePath(gsrc) {
+ // This source is a file, and either it's not an
+ // archive, or we don't care whether or not it's an
+ // archive.
+ d := dest
+ if destfi != nil && destfi.IsDir() {
+ d = filepath.Join(dest, filepath.Base(gsrc))
+ }
+ // Copy the file, preserving attributes.
+ logrus.Debugf("copying %q to %q", gsrc, d)
+ if err := copyFileWithTar(gsrc, d); err != nil {
+ return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
+ }
+ if err := setOwner(gsrc, d, user); err != nil {
+ return err
+ }
+ continue
+ }
+ // We're extracting an archive into the destination directory.
+ logrus.Debugf("extracting contents of %q into %q", gsrc, dest)
+ if err := untarPath(gsrc, dest); err != nil {
+ return errors.Wrapf(err, "error extracting %q into %q", gsrc, dest)
+ }
+ }
+ }
+ return nil
+}
+
+// user returns the user (and group) information which the destination should belong to.
+func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
+ if userspec == "" {
+ userspec = b.User()
+ }
+
+ uid, gid, err := chrootuser.GetUser(mountPoint, userspec)
+ u := specs.User{
+ UID: uid,
+ GID: gid,
+ Username: userspec,
+ }
+ return u, err
+}
+
+// setOwner sets the uid and gid owners of a given path.
+func setOwner(src, dest string, user specs.User) error {
+ fid, err := os.Stat(dest)
+ if err != nil {
+ return errors.Wrapf(err, "error reading %q", dest)
+ }
+ if !fid.IsDir() || src == "" {
+ if err := os.Lchown(dest, int(user.UID), int(user.GID)); err != nil {
+ return errors.Wrapf(err, "error setting ownership of %q", dest)
+ }
+ return nil
+ }
+ err = filepath.Walk(src, func(p string, info os.FileInfo, we error) error {
+ relPath, err2 := filepath.Rel(src, p)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error getting relative path of %q to set ownership on destination", p)
+ }
+ if relPath != "." {
+ absPath := filepath.Join(dest, relPath)
+ if err2 := os.Lchown(absPath, int(user.UID), int(user.GID)); err != nil {
+ return errors.Wrapf(err2, "error setting ownership of %q", absPath)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return errors.Wrapf(err, "error walking dir %q to set ownership", src)
+ }
+ return nil
+}
+
+// ensureDir creates a directory if it doesn't exist, setting ownership and permissions as passed by user and perm.
+func ensureDir(path string, user specs.User, perm os.FileMode) error {
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ if err := os.MkdirAll(path, perm); err != nil {
+ return errors.Wrapf(err, "error ensuring directory %q exists", path)
+ }
+ if err := os.Chown(path, int(user.UID), int(user.GID)); err != nil {
+ return errors.Wrapf(err, "error setting ownership of %q", path)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/projectatomic/buildah/buildah.go b/vendor/github.com/projectatomic/buildah/buildah.go
new file mode 100644
index 000000000..9b55dc320
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/buildah.go
@@ -0,0 +1,359 @@
+package buildah
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/buildah/docker"
+)
+
+const (
+ // Package is the name of this package, used in help output and to
+ // identify working containers.
+ Package = "buildah"
+ // Version for the Package. Bump version in contrib/rpm/buildah.spec
+ // too.
+ Version = "0.16"
+ // The value we use to identify what type of information, currently a
+ // serialized Builder structure, we are using as per-container state.
+ // This should only be changed when we make incompatible changes to
+ // that data structure, as it's used to distinguish containers which
+ // are "ours" from ones that aren't.
+ containerType = Package + " 0.0.1"
+ // The file in the per-container directory which we use to store our
+ // per-container state. If it isn't there, then the container isn't
+ // one of our build containers.
+ stateFile = Package + ".json"
+)
+
+const (
+ // PullIfMissing is one of the values that BuilderOptions.PullPolicy
+ // can take, signalling that the source image should be pulled from a
+ // registry if a local copy of it is not already present.
+ PullIfMissing = iota
+ // PullAlways is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that a fresh, possibly updated, copy of the image
+ // should be pulled from a registry before the build proceeds.
+ PullAlways
+ // PullNever is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that the source image should not be pulled from a
+ // registry if a local copy of it is not already present.
+ PullNever
+)
+
+// Builder objects are used to represent containers which are being used to
+// build images. They also carry potential updates which will be applied to
+// the image's configuration when the container's contents are used to build an
+// image.
+type Builder struct {
+ store storage.Store
+
+ // Type is used to help identify a build container's metadata. It
+ // should not be modified.
+ Type string `json:"type"`
+ // FromImage is the name of the source image which was used to create
+ // the container, if one was used. It should not be modified.
+ FromImage string `json:"image,omitempty"`
+ // FromImageID is the ID of the source image which was used to create
+ // the container, if one was used. It should not be modified.
+ FromImageID string `json:"image-id"`
+ // Config is the source image's configuration. It should not be
+ // modified.
+ Config []byte `json:"config,omitempty"`
+ // Manifest is the source image's manifest. It should not be modified.
+ Manifest []byte `json:"manifest,omitempty"`
+
+ // Container is the name of the build container. It should not be modified.
+ Container string `json:"container-name,omitempty"`
+ // ContainerID is the ID of the build container. It should not be modified.
+ ContainerID string `json:"container-id,omitempty"`
+ // MountPoint is the last location where the container's root
+ // filesystem was mounted. It should not be modified.
+ MountPoint string `json:"mountpoint,omitempty"`
+ // ProcessLabel is the SELinux process label associated with the container
+ ProcessLabel string `json:"process-label,omitempty"`
+ // MountLabel is the SELinux mount label associated with the container
+ MountLabel string `json:"mount-label,omitempty"`
+
+ // ImageAnnotations is a set of key-value pairs which is stored in the
+ // image's manifest.
+ ImageAnnotations map[string]string `json:"annotations,omitempty"`
+ // ImageCreatedBy is a description of how this container was built.
+ ImageCreatedBy string `json:"created-by,omitempty"`
+
+ // Image metadata and runtime settings, in multiple formats.
+ OCIv1 v1.Image `json:"ociv1,omitempty"`
+ Docker docker.V2Image `json:"docker,omitempty"`
+ // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
+ DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
+ CommonBuildOpts *CommonBuildOptions
+}
+
+// BuilderInfo are used as objects to display container information
+type BuilderInfo struct {
+ Type string
+ FromImage string
+ FromImageID string
+ Config string
+ Manifest string
+ Container string
+ ContainerID string
+ MountPoint string
+ ProcessLabel string
+ MountLabel string
+ ImageAnnotations map[string]string
+ ImageCreatedBy string
+ OCIv1 v1.Image
+ Docker docker.V2Image
+ DefaultMountsFilePath string
+}
+
+// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
+// This is used in the inspect command to display Manifest and Config as string and not []byte.
+func GetBuildInfo(b *Builder) BuilderInfo {
+ return BuilderInfo{
+ Type: b.Type,
+ FromImage: b.FromImage,
+ FromImageID: b.FromImageID,
+ Config: string(b.Config),
+ Manifest: string(b.Manifest),
+ Container: b.Container,
+ ContainerID: b.ContainerID,
+ MountPoint: b.MountPoint,
+ ProcessLabel: b.ProcessLabel,
+ ImageAnnotations: b.ImageAnnotations,
+ ImageCreatedBy: b.ImageCreatedBy,
+ OCIv1: b.OCIv1,
+ Docker: b.Docker,
+ DefaultMountsFilePath: b.DefaultMountsFilePath,
+ }
+}
+
+// CommonBuildOptions are reseources that can be defined by flags for both buildah from and bud
+type CommonBuildOptions struct {
+ // AddHost is the list of hostnames to add to the resolv.conf
+ AddHost []string
+ //CgroupParent it the path to cgroups under which the cgroup for the container will be created.
+ CgroupParent string
+ //CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period
+ CPUPeriod uint64
+ //CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota
+ CPUQuota int64
+ //CPUShares (relative weight
+ CPUShares uint64
+ //CPUSetCPUs in which to allow execution (0-3, 0,1)
+ CPUSetCPUs string
+ //CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+ CPUSetMems string
+ //Memory limit
+ Memory int64
+ //MemorySwap limit value equal to memory plus swap.
+ MemorySwap int64
+ //SecruityOpts modify the way container security is running
+ LabelOpts []string
+ SeccompProfilePath string
+ ApparmorProfile string
+ //ShmSize is the shared memory size
+ ShmSize string
+ //Ulimit options
+ Ulimit []string
+ //Volumes to bind mount into the container
+ Volumes []string
+}
+
+// BuilderOptions are used to initialize a new Builder.
+type BuilderOptions struct {
+ // FromImage is the name of the image which should be used as the
+ // starting point for the container. It can be set to an empty value
+ // or "scratch" to indicate that the container should not be based on
+ // an image.
+ FromImage string
+ // Container is a desired name for the build container.
+ Container string
+ // PullPolicy decides whether or not we should pull the image that
+ // we're using as a base image. It should be PullIfMissing,
+ // PullAlways, or PullNever.
+ PullPolicy int
+ // Registry is a value which is prepended to the image's name, if it
+ // needs to be pulled and the image name alone can not be resolved to a
+ // reference to a source image. No separator is implicitly added.
+ Registry string
+ // Transport is a value which is prepended to the image's name, if it
+ // needs to be pulled and the image name alone, or the image name and
+ // the registry together, can not be resolved to a reference to a
+ // source image. No separator is implicitly added.
+ Transport string
+ // Mount signals to NewBuilder() that the container should be mounted
+ // immediately.
+ Mount bool
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // ReportWriter is an io.Writer which will be used to log the reading
+ // of the source image from a registry, if we end up pulling the image.
+ ReportWriter io.Writer
+ // github.com/containers/image/types SystemContext to hold credentials
+ // and other authentication/authorization information.
+ SystemContext *types.SystemContext
+ // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
+ DefaultMountsFilePath string
+ CommonBuildOpts *CommonBuildOptions
+}
+
+// ImportOptions are used to initialize a Builder from an existing container
+// which was created elsewhere.
+type ImportOptions struct {
+ // Container is the name of the build container.
+ Container string
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+}
+
+// ImportFromImageOptions are used to initialize a Builder from an image.
+type ImportFromImageOptions struct {
+ // Image is the name or ID of the image we'd like to examine.
+ Image string
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // github.com/containers/image/types SystemContext to hold information
+ // about which registries we should check for completing image names
+ // that don't include a domain portion.
+ SystemContext *types.SystemContext
+}
+
+// NewBuilder creates a new build container.
+func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
+ return newBuilder(ctx, store, options)
+}
+
+// ImportBuilder creates a new build configuration using an already-present
+// container.
+func ImportBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
+ return importBuilder(ctx, store, options)
+}
+
+// ImportBuilderFromImage creates a new builder configuration using an image.
+// The returned object can be modified and examined, but it can not be saved
+// or committed because it is not associated with a working container.
+func ImportBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) {
+ return importBuilderFromImage(ctx, store, options)
+}
+
+// OpenBuilder loads information about a build container given its name or ID.
+func OpenBuilder(store storage.Store, container string) (*Builder, error) {
+ cdir, err := store.ContainerDirectory(container)
+ if err != nil {
+ return nil, err
+ }
+ buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
+ if err != nil {
+ return nil, err
+ }
+ b := &Builder{}
+ err = json.Unmarshal(buildstate, &b)
+ if err != nil {
+ return nil, err
+ }
+ if b.Type != containerType {
+ return nil, errors.Errorf("container is not a %s container", Package)
+ }
+ b.store = store
+ b.fixupConfig()
+ return b, nil
+}
+
+// OpenBuilderByPath loads information about a build container given a
+// path to the container's root filesystem
+func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, err
+ }
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ builderMatchesPath := func(b *Builder, path string) bool {
+ return (b.MountPoint == path)
+ }
+ for _, container := range containers {
+ cdir, err := store.ContainerDirectory(container.ID)
+ if err != nil {
+ return nil, err
+ }
+ buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
+ if err != nil {
+ return nil, err
+ }
+ b := &Builder{}
+ err = json.Unmarshal(buildstate, &b)
+ if err == nil && b.Type == containerType && builderMatchesPath(b, abs) {
+ b.store = store
+ b.fixupConfig()
+ return b, nil
+ }
+ }
+ return nil, storage.ErrContainerUnknown
+}
+
+// OpenAllBuilders loads all containers which have a state file that we use in
+// their data directory, typically so that they can be listed.
+func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, err
+ }
+ for _, container := range containers {
+ cdir, err := store.ContainerDirectory(container.ID)
+ if err != nil {
+ return nil, err
+ }
+ buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
+ if err != nil && os.IsNotExist(err) {
+ continue
+ }
+ b := &Builder{}
+ err = json.Unmarshal(buildstate, &b)
+ if err == nil && b.Type == containerType {
+ b.store = store
+ b.fixupConfig()
+ builders = append(builders, b)
+ }
+ }
+ return builders, nil
+}
+
+// Save saves the builder's current state to the build container's metadata.
+// This should not need to be called directly, as other methods of the Builder
+// object take care of saving their state.
+func (b *Builder) Save() error {
+ buildstate, err := json.Marshal(b)
+ if err != nil {
+ return err
+ }
+ cdir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return err
+ }
+ return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600)
+}
diff --git a/libpod/buildah/commit.go b/vendor/github.com/projectatomic/buildah/commit.go
index 537f9edbf..a5b8aaf40 100644
--- a/libpod/buildah/commit.go
+++ b/vendor/github.com/projectatomic/buildah/commit.go
@@ -14,6 +14,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
+ "github.com/projectatomic/buildah/util"
"github.com/sirupsen/logrus"
)
@@ -108,7 +109,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if err != nil {
return errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
}
- err = AddImageNames(b.store, img, options.AdditionalTags)
+ err = util.AddImageNames(b.store, img, options.AdditionalTags)
if err != nil {
return errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...))
}
@@ -117,6 +118,11 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name())
}
}
+
+ img, err := is.Transport.GetStoreImage(b.store, dest)
+ if err == nil {
+ fmt.Printf("%s\n", img.ID)
+ }
return nil
}
diff --git a/libpod/buildah/common.go b/vendor/github.com/projectatomic/buildah/common.go
index 18c960003..18c960003 100644
--- a/libpod/buildah/common.go
+++ b/vendor/github.com/projectatomic/buildah/common.go
diff --git a/libpod/buildah/config.go b/vendor/github.com/projectatomic/buildah/config.go
index 0759ca9da..efbb133de 100644
--- a/libpod/buildah/config.go
+++ b/vendor/github.com/projectatomic/buildah/config.go
@@ -7,10 +7,10 @@ import (
"strings"
"time"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "github.com/projectatomic/libpod/cmd/podman/docker"
+ "github.com/projectatomic/buildah/docker"
)
// makeOCIv1Image builds the best OCIv1 image structure we can from the
@@ -35,6 +35,7 @@ func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) {
Volumes: config.Volumes,
WorkingDir: config.WorkingDir,
Labels: config.Labels,
+ StopSignal: config.StopSignal,
},
RootFS: ociv1.RootFS{
Type: "",
@@ -84,6 +85,7 @@ func makeDockerV2S2Image(oimage *ociv1.Image) (docker.V2Image, error) {
Volumes: oimage.Config.Volumes,
WorkingDir: oimage.Config.WorkingDir,
Labels: oimage.Config.Labels,
+ StopSignal: oimage.Config.StopSignal,
},
},
RootFS: &docker.V2S2RootFS{
@@ -581,20 +583,21 @@ func (b *Builder) SetDefaultMountsFilePath(path string) {
}
// Comment returns the comment which will be set in the container and in
-//containers built using images buiilt from the container
+// containers built using images built from the container
func (b *Builder) Comment() string {
return b.Docker.Comment
}
// SetComment sets the Comment which will be set in the container and in
// containers built using images built from the container.
+// Note: this setting is not present in the OCIv1 image format, so it is
+// discarded when writing images using OCIv1 formats.
func (b *Builder) SetComment(comment string) {
b.Docker.Comment = comment
- b.OCIv1.History[0].Comment = comment
}
// StopSignal returns the signal which will be set in the container and in
-//containers built using images buiilt from the container
+// containers built using images buiilt from the container
func (b *Builder) StopSignal() string {
return b.Docker.Config.StopSignal
}
diff --git a/vendor/github.com/projectatomic/buildah/delete.go b/vendor/github.com/projectatomic/buildah/delete.go
new file mode 100644
index 000000000..8de774ff9
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/delete.go
@@ -0,0 +1,18 @@
+package buildah
+
+import (
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+)
+
+// Delete removes the working container. The buildah.Builder object should not
+// be used after this method is called.
+func (b *Builder) Delete() error {
+ if err := b.store.DeleteContainer(b.ContainerID); err != nil {
+ return errors.Wrapf(err, "error deleting build container")
+ }
+ b.MountPoint = ""
+ b.Container = ""
+ b.ContainerID = ""
+ return label.ReleaseLabel(b.ProcessLabel)
+}
diff --git a/vendor/github.com/projectatomic/buildah/docker/types.go b/vendor/github.com/projectatomic/buildah/docker/types.go
new file mode 100644
index 000000000..9890eaf93
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/docker/types.go
@@ -0,0 +1,271 @@
+package docker
+
+//
+// Types extracted from Docker
+//
+
+import (
+ "time"
+
+ "github.com/containers/image/pkg/strslice"
+ "github.com/opencontainers/go-digest"
+)
+
+// github.com/moby/moby/image/rootfs.go
+const TypeLayers = "layers"
+
+// github.com/docker/distribution/manifest/schema2/manifest.go
+const V2S2MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
+
+// github.com/docker/distribution/manifest/schema2/manifest.go
+const V2S2MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
+
+// github.com/docker/distribution/manifest/schema2/manifest.go
+const V2S2MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+
+// github.com/docker/distribution/manifest/schema2/manifest.go
+const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
+
+// github.com/moby/moby/image/rootfs.go
+// RootFS describes images root filesystem
+// This is currently a placeholder that only supports layers. In the future
+// this can be made into an interface that supports different implementations.
+type V2S2RootFS struct {
+ Type string `json:"type"`
+ DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// History stores build commands that were used to create an image
+type V2S2History struct {
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // CreatedBy keeps the Dockerfile command used while building the image
+ CreatedBy string `json:"created_by,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // EmptyLayer is set to true if this history item did not generate a
+ // layer. Otherwise, the history item is associated with the next
+ // layer in the RootFS section.
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// ID is the content-addressable ID of an image.
+type ID digest.Digest
+
+// github.com/moby/moby/api/types/container/config.go
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// github.com/docker/go-connections/nat/nat.go
+// PortSet is a collection of structs indexed by Port
+type PortSet map[Port]struct{}
+
+// github.com/docker/go-connections/nat/nat.go
+// Port is a string containing port number and protocol in the format "80/tcp"
+type Port string
+
+// github.com/moby/moby/api/types/container/config.go
+// Config contains the configuration data about a container.
+// It should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
+type Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
+
+// github.com/docker/distribution/manifest/schema1/config_builder.go
+// For non-top-level layers, create fake V1Compatibility strings that
+// fit the format and don't collide with anything else, but don't
+// result in runnable images on their own.
+type V1Compatibility struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig struct {
+ Cmd []string
+ } `json:"container_config,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// V1Image stores the V1 image configuration.
+type V1Image struct {
+ // ID is a unique 64 character identifier of the image
+ ID string `json:"id,omitempty"`
+ // Parent is the ID of the parent image
+ Parent string `json:"parent,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Container is the id of the container used to commit
+ Container string `json:"container,omitempty"`
+ // ContainerConfig is the configuration of the container that is committed into the image
+ ContainerConfig Config `json:"container_config,omitempty"`
+ // DockerVersion specifies the version of Docker that was used to build the image
+ DockerVersion string `json:"docker_version,omitempty"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *Config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is build and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // OS is the operating system used to build and run the image
+ OS string `json:"os,omitempty"`
+ // Size is the total size of the image including all layers it is composed of
+ Size int64 `json:",omitempty"`
+}
+
+// github.com/moby/moby/image/image.go
+// Image stores the image configuration
+type V2Image struct {
+ V1Image
+ Parent ID `json:"parent,omitempty"`
+ RootFS *V2S2RootFS `json:"rootfs,omitempty"`
+ History []V2S2History `json:"history,omitempty"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+
+ // rawJSON caches the immutable JSON associated with this image.
+ rawJSON []byte
+
+ // computedID is the ID computed from the hash of the image config.
+ // Not to be confused with the legacy V1 ID in V1Image.
+ computedID ID
+}
+
+// github.com/docker/distribution/manifest/versioned.go
+// Versioned provides a struct with the manifest schemaVersion and mediaType.
+// Incoming content with unknown schema version can be decoded against this
+// struct to check the version.
+type V2Versioned struct {
+ // SchemaVersion is the image manifest schema that this image follows
+ SchemaVersion int `json:"schemaVersion"`
+
+ // MediaType is the media type of this schema.
+ MediaType string `json:"mediaType,omitempty"`
+}
+
+// github.com/docker/distribution/manifest/schema1/manifest.go
+// FSLayer is a container struct for BlobSums defined in an image manifest
+type V2S1FSLayer struct {
+ // BlobSum is the tarsum of the referenced filesystem image layer
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+// github.com/docker/distribution/manifest/schema1/manifest.go
+// History stores unstructured v1 compatibility information
+type V2S1History struct {
+ // V1Compatibility is the raw v1 compatibility information
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+// github.com/docker/distribution/manifest/schema1/manifest.go
+// Manifest provides the base accessible fields for working with V2 image
+// format in the registry.
+type V2S1Manifest struct {
+ V2Versioned
+
+ // Name is the name of the image's repository
+ Name string `json:"name"`
+
+ // Tag is the tag of the image specified by this manifest
+ Tag string `json:"tag"`
+
+ // Architecture is the host architecture on which this image is intended to
+ // run
+ Architecture string `json:"architecture"`
+
+ // FSLayers is a list of filesystem layer blobSums contained in this image
+ FSLayers []V2S1FSLayer `json:"fsLayers"`
+
+ // History is a list of unstructured historical data for v1 compatibility
+ History []V2S1History `json:"history"`
+}
+
+// github.com/docker/distribution/blobs.go
+// Descriptor describes targeted content. Used in conjunction with a blob
+// store, a descriptor can be used to fetch, store and target any kind of
+// blob. The struct also describes the wire protocol format. Fields should
+// only be added but never changed.
+type V2S2Descriptor struct {
+ // MediaType describe the type of the content. All text based formats are
+ // encoded as utf-8.
+ MediaType string `json:"mediaType,omitempty"`
+
+ // Size in bytes of content.
+ Size int64 `json:"size,omitempty"`
+
+ // Digest uniquely identifies the content. A byte stream can be verified
+ // against against this digest.
+ Digest digest.Digest `json:"digest,omitempty"`
+
+ // URLs contains the source URLs of this content.
+ URLs []string `json:"urls,omitempty"`
+
+ // NOTE: Before adding a field here, please ensure that all
+ // other options have been exhausted. Much of the type relationships
+ // depend on the simplicity of this type.
+}
+
+// github.com/docker/distribution/manifest/schema2/manifest.go
+// Manifest defines a schema2 manifest.
+type V2S2Manifest struct {
+ V2Versioned
+
+ // Config references the image configuration as a blob.
+ Config V2S2Descriptor `json:"config"`
+
+ // Layers lists descriptors for the layers referenced by the
+ // configuration.
+ Layers []V2S2Descriptor `json:"layers"`
+}
diff --git a/libpod/buildah/image.go b/vendor/github.com/projectatomic/buildah/image.go
index 7232d53ad..e5a49f1f9 100644
--- a/libpod/buildah/image.go
+++ b/vendor/github.com/projectatomic/buildah/image.go
@@ -21,7 +21,7 @@ import (
specs "github.com/opencontainers/image-spec/specs-go"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "github.com/projectatomic/libpod/cmd/podman/docker"
+ "github.com/projectatomic/buildah/docker"
"github.com/sirupsen/logrus"
)
@@ -450,7 +450,7 @@ func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *
return i.manifest, i.manifestType, nil
}
-func (i *containerImageSource) LayerInfosForCopy(context.Context) ([]types.BlobInfo, error) {
+func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
return nil, nil
}
diff --git a/vendor/github.com/projectatomic/buildah/imagebuildah/build.go b/vendor/github.com/projectatomic/buildah/imagebuildah/build.go
new file mode 100644
index 000000000..c477e0996
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/imagebuildah/build.go
@@ -0,0 +1,775 @@
+package imagebuildah
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/transports/alltransports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/stringid"
+ "github.com/docker/docker/builder/dockerfile/parser"
+ docker "github.com/fsouza/go-dockerclient"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/openshift/imagebuilder"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/buildah"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ PullIfMissing = buildah.PullIfMissing
+ PullAlways = buildah.PullAlways
+ PullNever = buildah.PullNever
+ DefaultRuntime = buildah.DefaultRuntime
+ OCIv1ImageFormat = buildah.OCIv1ImageManifest
+ Dockerv2ImageFormat = buildah.Dockerv2ImageManifest
+
+ Gzip = archive.Gzip
+ Bzip2 = archive.Bzip2
+ Xz = archive.Xz
+ Uncompressed = archive.Uncompressed
+)
+
+// Mount is a mountpoint for the build container.
+type Mount specs.Mount
+
+// BuildOptions can be used to alter how an image is built.
+type BuildOptions struct {
+ // ContextDirectory is the default source location for COPY and ADD
+ // commands.
+ ContextDirectory string
+ // PullPolicy controls whether or not we pull images. It should be one
+ // of PullIfMissing, PullAlways, or PullNever.
+ PullPolicy int
+ // Registry is a value which is prepended to the image's name, if it
+ // needs to be pulled and the image name alone can not be resolved to a
+ // reference to a source image. No separator is implicitly added.
+ Registry string
+ // Transport is a value which is prepended to the image's name, if it
+ // needs to be pulled and the image name alone, or the image name and
+ // the registry together, can not be resolved to a reference to a
+ // source image. No separator is implicitly added.
+ Transport string
+ // IgnoreUnrecognizedInstructions tells us to just log instructions we
+ // don't recognize, and try to keep going.
+ IgnoreUnrecognizedInstructions bool
+ // Quiet tells us whether or not to announce steps as we go through them.
+ Quiet bool
+ // Runtime is the name of the command to run for RUN instructions. It
+ // should accept the same arguments and flags that runc does.
+ Runtime string
+ // RuntimeArgs adds global arguments for the runtime.
+ RuntimeArgs []string
+ // TransientMounts is a list of mounts that won't be kept in the image.
+ TransientMounts []Mount
+ // Compression specifies the type of compression which is applied to
+ // layer blobs. The default is to not use compression, but
+ // archive.Gzip is recommended.
+ Compression archive.Compression
+ // Arguments which can be interpolated into Dockerfiles
+ Args map[string]string
+ // Name of the image to write to.
+ Output string
+ // Additional tags to add to the image that we write, if we know of a
+ // way to add them.
+ AdditionalTags []string
+ // Log is a callback that will print a progress message. If no value
+ // is supplied, the message will be sent to Err (or os.Stderr, if Err
+ // is nil) by default.
+ Log func(format string, args ...interface{})
+ // Out is a place where non-error log messages are sent.
+ Out io.Writer
+ // Err is a place where error log messages should be sent.
+ Err io.Writer
+ // SignaturePolicyPath specifies an override location for the signature
+ // policy which should be used for verifying the new image as it is
+ // being written. Except in specific circumstances, no value should be
+ // specified, indicating that the shared, system-wide default policy
+ // should be used.
+ SignaturePolicyPath string
+ // ReportWriter is an io.Writer which will be used to report the
+ // progress of the (possible) pulling of the source image and the
+ // writing of the new image.
+ ReportWriter io.Writer
+ // OutputFormat is the format of the output image's manifest and
+ // configuration data.
+ // Accepted values are OCIv1ImageFormat and Dockerv2ImageFormat.
+ OutputFormat string
+ // SystemContext holds parameters used for authentication.
+ SystemContext *types.SystemContext
+ CommonBuildOpts *buildah.CommonBuildOptions
+ // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
+ DefaultMountsFilePath string
+}
+
+// Executor is a buildah-based implementation of the imagebuilder.Executor
+// interface.
+type Executor struct {
+ index int
+ name string
+ named map[string]*Executor
+ store storage.Store
+ contextDir string
+ builder *buildah.Builder
+ pullPolicy int
+ registry string
+ transport string
+ ignoreUnrecognizedInstructions bool
+ quiet bool
+ runtime string
+ runtimeArgs []string
+ transientMounts []Mount
+ compression archive.Compression
+ output string
+ outputFormat string
+ additionalTags []string
+ log func(format string, args ...interface{})
+ out io.Writer
+ err io.Writer
+ signaturePolicyPath string
+ systemContext *types.SystemContext
+ mountPoint string
+ preserved int
+ volumes imagebuilder.VolumeSet
+ volumeCache map[string]string
+ volumeCacheInfo map[string]os.FileInfo
+ reportWriter io.Writer
+ commonBuildOptions *buildah.CommonBuildOptions
+ defaultMountsFilePath string
+}
+
+// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
+func (b *Executor) withName(name string, index int) *Executor {
+ if b.named == nil {
+ b.named = make(map[string]*Executor)
+ }
+ copied := *b
+ copied.index = index
+ copied.name = name
+ child := &copied
+ b.named[name] = child
+ if idx := strconv.Itoa(index); idx != name {
+ b.named[idx] = child
+ }
+ return child
+}
+
+// Preserve informs the executor that from this point on, it needs to ensure
+// that only COPY and ADD instructions can modify the contents of this
+// directory or anything below it.
+// The Executor handles this by caching the contents of directories which have
+// been marked this way before executing a RUN instruction, invalidating that
+// cache when an ADD or COPY instruction sets any location under the directory
+// as the destination, and using the cache to reset the contents of the
+// directory tree after processing each RUN instruction.
+// It would be simpler if we could just mark the directory as a read-only bind
+// mount of itself during Run(), but the directory is expected to be remain
+// writeable, even if any changes within it are ultimately discarded.
+func (b *Executor) Preserve(path string) error {
+ logrus.Debugf("PRESERVE %q", path)
+ if b.volumes.Covers(path) {
+ // This path is already a subdirectory of a volume path that
+ // we're already preserving, so there's nothing new to be done
+ // except ensure that it exists.
+ archivedPath := filepath.Join(b.mountPoint, path)
+ if err := os.MkdirAll(archivedPath, 0755); err != nil {
+ return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
+ }
+ if err := b.volumeCacheInvalidate(path); err != nil {
+ return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath)
+ }
+ return nil
+ }
+ // Figure out where the cache for this volume would be stored.
+ b.preserved++
+ cacheDir, err := b.store.ContainerDirectory(b.builder.ContainerID)
+ if err != nil {
+ return errors.Errorf("unable to locate temporary directory for container")
+ }
+ cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", b.preserved))
+ // Save info about the top level of the location that we'll be archiving.
+ archivedPath := filepath.Join(b.mountPoint, path)
+
+ // Try and resolve the symlink (if one exists)
+ // Set archivedPath and path based on whether a symlink is found or not
+ if symLink, err := resolveSymLink(b.mountPoint, path); err == nil {
+ archivedPath = filepath.Join(b.mountPoint, symLink)
+ path = symLink
+ } else {
+ return errors.Wrapf(err, "error reading symbolic link to %q", path)
+ }
+
+ st, err := os.Stat(archivedPath)
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(archivedPath, 0755); err != nil {
+ return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
+ }
+ st, err = os.Stat(archivedPath)
+ }
+ if err != nil {
+ logrus.Debugf("error reading info about %q: %v", archivedPath, err)
+ return errors.Wrapf(err, "error reading info about volume path %q", archivedPath)
+ }
+ b.volumeCacheInfo[path] = st
+ if !b.volumes.Add(path) {
+ // This path is not a subdirectory of a volume path that we're
+ // already preserving, so adding it to the list should work.
+ return errors.Errorf("error adding %q to the volume cache", path)
+ }
+ b.volumeCache[path] = cacheFile
+ // Now prune cache files for volumes that are now supplanted by this one.
+ removed := []string{}
+ for cachedPath := range b.volumeCache {
+ // Walk our list of cached volumes, and check that they're
+ // still in the list of locations that we need to cache.
+ found := false
+ for _, volume := range b.volumes {
+ if volume == cachedPath {
+ // We need to keep this volume's cache.
+ found = true
+ break
+ }
+ }
+ if !found {
+ // We don't need to keep this volume's cache. Make a
+ // note to remove it.
+ removed = append(removed, cachedPath)
+ }
+ }
+ // Actually remove the caches that we decided to remove.
+ for _, cachedPath := range removed {
+ archivedPath := filepath.Join(b.mountPoint, cachedPath)
+ logrus.Debugf("no longer need cache of %q in %q", archivedPath, b.volumeCache[cachedPath])
+ if err := os.Remove(b.volumeCache[cachedPath]); err != nil {
+ return errors.Wrapf(err, "error removing %q", b.volumeCache[cachedPath])
+ }
+ delete(b.volumeCache, cachedPath)
+ }
+ return nil
+}
+
+// Remove any volume cache item which will need to be re-saved because we're
+// writing to part of it.
+func (b *Executor) volumeCacheInvalidate(path string) error {
+ invalidated := []string{}
+ for cachedPath := range b.volumeCache {
+ if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) {
+ invalidated = append(invalidated, cachedPath)
+ }
+ }
+ for _, cachedPath := range invalidated {
+ if err := os.Remove(b.volumeCache[cachedPath]); err != nil {
+ return errors.Wrapf(err, "error removing volume cache %q", b.volumeCache[cachedPath])
+ }
+ archivedPath := filepath.Join(b.mountPoint, cachedPath)
+ logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, b.volumeCache[cachedPath])
+ delete(b.volumeCache, cachedPath)
+ }
+ return nil
+}
+
+// Save the contents of each of the executor's list of volumes for which we
+// don't already have a cache file.
+func (b *Executor) volumeCacheSave() error {
+ for cachedPath, cacheFile := range b.volumeCache {
+ archivedPath := filepath.Join(b.mountPoint, cachedPath)
+ _, err := os.Stat(cacheFile)
+ if err == nil {
+ logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
+ continue
+ }
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error checking for cache of %q in %q", archivedPath, cacheFile)
+ }
+ if err := os.MkdirAll(archivedPath, 0755); err != nil {
+ return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
+ }
+ logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
+ cache, err := os.Create(cacheFile)
+ if err != nil {
+ return errors.Wrapf(err, "error creating archive at %q", cacheFile)
+ }
+ defer cache.Close()
+ rc, err := archive.Tar(archivedPath, archive.Uncompressed)
+ if err != nil {
+ return errors.Wrapf(err, "error archiving %q", archivedPath)
+ }
+ defer rc.Close()
+ _, err = io.Copy(cache, rc)
+ if err != nil {
+ return errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
+ }
+ }
+ return nil
+}
+
+// Restore the contents of each of the executor's list of volumes.
+func (b *Executor) volumeCacheRestore() error {
+ for cachedPath, cacheFile := range b.volumeCache {
+ archivedPath := filepath.Join(b.mountPoint, cachedPath)
+ logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
+ cache, err := os.Open(cacheFile)
+ if err != nil {
+ return errors.Wrapf(err, "error opening archive at %q", cacheFile)
+ }
+ defer cache.Close()
+ if err := os.RemoveAll(archivedPath); err != nil {
+ return errors.Wrapf(err, "error clearing volume path %q", archivedPath)
+ }
+ if err := os.MkdirAll(archivedPath, 0755); err != nil {
+ return errors.Wrapf(err, "error recreating volume path %q", archivedPath)
+ }
+ err = archive.Untar(cache, archivedPath, nil)
+ if err != nil {
+ return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
+ }
+ if st, ok := b.volumeCacheInfo[cachedPath]; ok {
+ if err := os.Chmod(archivedPath, st.Mode()); err != nil {
+ return errors.Wrapf(err, "error restoring permissions on %q", archivedPath)
+ }
+ if err := os.Chown(archivedPath, 0, 0); err != nil {
+ return errors.Wrapf(err, "error setting ownership on %q", archivedPath)
+ }
+ if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil {
+ return errors.Wrapf(err, "error restoring datestamps on %q", archivedPath)
+ }
+ }
+ }
+ return nil
+}
+
+// Copy copies data into the working tree. The "Download" field is how
+// imagebuilder tells us the instruction was "ADD" and not "COPY".
+func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
+ for _, copy := range copies {
+ logrus.Debugf("COPY %#v, %#v", excludes, copy)
+ if err := b.volumeCacheInvalidate(copy.Dest); err != nil {
+ return err
+ }
+ sources := []string{}
+ for _, src := range copy.Src {
+ if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
+ sources = append(sources, src)
+ } else if len(copy.From) > 0 {
+ if other, ok := b.named[copy.From]; ok && other.index < b.index {
+ sources = append(sources, filepath.Join(other.mountPoint, src))
+ } else {
+ return errors.Errorf("the stage %q has not been built", copy.From)
+ }
+ } else {
+ sources = append(sources, filepath.Join(b.contextDir, src))
+ }
+ }
+ if err := b.builder.Add(copy.Dest, copy.Download, buildah.AddAndCopyOptions{}, sources...); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func convertMounts(mounts []Mount) []specs.Mount {
+ specmounts := []specs.Mount{}
+ for _, m := range mounts {
+ s := specs.Mount{
+ Destination: m.Destination,
+ Type: m.Type,
+ Source: m.Source,
+ Options: m.Options,
+ }
+ specmounts = append(specmounts, s)
+ }
+ return specmounts
+}
+
+// Run executes a RUN instruction using the working container as a root
+// directory.
+func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
+ logrus.Debugf("RUN %#v, %#v", run, config)
+ if b.builder == nil {
+ return errors.Errorf("no build container available")
+ }
+ options := buildah.RunOptions{
+ Hostname: config.Hostname,
+ Runtime: b.runtime,
+ Args: b.runtimeArgs,
+ Mounts: convertMounts(b.transientMounts),
+ Env: config.Env,
+ User: config.User,
+ WorkingDir: config.WorkingDir,
+ Entrypoint: config.Entrypoint,
+ Cmd: config.Cmd,
+ NetworkDisabled: config.NetworkDisabled,
+ Quiet: b.quiet,
+ }
+
+ args := run.Args
+ if run.Shell {
+ args = append([]string{"/bin/sh", "-c"}, args...)
+ }
+ if err := b.volumeCacheSave(); err != nil {
+ return err
+ }
+ err := b.builder.Run(args, options)
+ if err2 := b.volumeCacheRestore(); err2 != nil {
+ if err == nil {
+ return err2
+ }
+ }
+ return err
+}
+
+// UnrecognizedInstruction is called when we encounter an instruction that the
+// imagebuilder parser didn't understand.
+func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error {
+ errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command)
+ err := fmt.Sprintf(errStr+"%#v", step)
+ if b.ignoreUnrecognizedInstructions {
+ logrus.Debugf(err)
+ return nil
+ }
+
+ switch logrus.GetLevel() {
+ case logrus.ErrorLevel:
+ logrus.Errorf(errStr)
+ case logrus.DebugLevel:
+ logrus.Debugf(err)
+ default:
+ logrus.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
+ }
+
+ return errors.Errorf(err)
+}
+
+// NewExecutor creates a new instance of the imagebuilder.Executor interface.
+func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
+ exec := Executor{
+ store: store,
+ contextDir: options.ContextDirectory,
+ pullPolicy: options.PullPolicy,
+ registry: options.Registry,
+ transport: options.Transport,
+ ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
+ quiet: options.Quiet,
+ runtime: options.Runtime,
+ runtimeArgs: options.RuntimeArgs,
+ transientMounts: options.TransientMounts,
+ compression: options.Compression,
+ output: options.Output,
+ outputFormat: options.OutputFormat,
+ additionalTags: options.AdditionalTags,
+ signaturePolicyPath: options.SignaturePolicyPath,
+ systemContext: options.SystemContext,
+ volumeCache: make(map[string]string),
+ volumeCacheInfo: make(map[string]os.FileInfo),
+ log: options.Log,
+ out: options.Out,
+ err: options.Err,
+ reportWriter: options.ReportWriter,
+ commonBuildOptions: options.CommonBuildOpts,
+ defaultMountsFilePath: options.DefaultMountsFilePath,
+ }
+ if exec.err == nil {
+ exec.err = os.Stderr
+ }
+ if exec.out == nil {
+ exec.out = os.Stdout
+ }
+ if exec.log == nil {
+ stepCounter := 0
+ exec.log = func(format string, args ...interface{}) {
+ stepCounter++
+ prefix := fmt.Sprintf("STEP %d: ", stepCounter)
+ suffix := "\n"
+ fmt.Fprintf(exec.err, prefix+format+suffix, args...)
+ }
+ }
+ return &exec, nil
+}
+
+// Prepare creates a working container based on specified image, or if one
+// isn't specified, the first FROM instruction we can find in the parsed tree.
+func (b *Executor) Prepare(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node, from string) error {
+ if from == "" {
+ base, err := ib.From(node)
+ if err != nil {
+ logrus.Debugf("Prepare(node.Children=%#v)", node.Children)
+ return errors.Wrapf(err, "error determining starting point for build")
+ }
+ from = base
+ }
+ logrus.Debugf("FROM %#v", from)
+ if !b.quiet {
+ b.log("FROM %s", from)
+ }
+ builderOptions := buildah.BuilderOptions{
+ FromImage: from,
+ PullPolicy: b.pullPolicy,
+ Registry: b.registry,
+ Transport: b.transport,
+ SignaturePolicyPath: b.signaturePolicyPath,
+ ReportWriter: b.reportWriter,
+ SystemContext: b.systemContext,
+ CommonBuildOpts: b.commonBuildOptions,
+ DefaultMountsFilePath: b.defaultMountsFilePath,
+ }
+ builder, err := buildah.NewBuilder(ctx, b.store, builderOptions)
+ if err != nil {
+ return errors.Wrapf(err, "error creating build container")
+ }
+ volumes := map[string]struct{}{}
+ for _, v := range builder.Volumes() {
+ volumes[v] = struct{}{}
+ }
+ dConfig := docker.Config{
+ Hostname: builder.Hostname(),
+ Domainname: builder.Domainname(),
+ User: builder.User(),
+ Env: builder.Env(),
+ Cmd: builder.Cmd(),
+ Image: from,
+ Volumes: volumes,
+ WorkingDir: builder.WorkDir(),
+ Entrypoint: builder.Entrypoint(),
+ Labels: builder.Labels(),
+ Shell: builder.Shell(),
+ StopSignal: builder.StopSignal(),
+ }
+ var rootfs *docker.RootFS
+ if builder.Docker.RootFS != nil {
+ rootfs = &docker.RootFS{
+ Type: builder.Docker.RootFS.Type,
+ }
+ for _, id := range builder.Docker.RootFS.DiffIDs {
+ rootfs.Layers = append(rootfs.Layers, id.String())
+ }
+ }
+ dImage := docker.Image{
+ Parent: builder.FromImage,
+ ContainerConfig: dConfig,
+ Container: builder.Container,
+ Author: builder.Maintainer(),
+ Architecture: builder.Architecture(),
+ RootFS: rootfs,
+ }
+ dImage.Config = &dImage.ContainerConfig
+ err = ib.FromImage(&dImage, node)
+ if err != nil {
+ if err2 := builder.Delete(); err2 != nil {
+ logrus.Debugf("error deleting container which we failed to update: %v", err2)
+ }
+ return errors.Wrapf(err, "error updating build context")
+ }
+ mountPoint, err := builder.Mount(builder.MountLabel)
+ if err != nil {
+ if err2 := builder.Delete(); err2 != nil {
+ logrus.Debugf("error deleting container which we failed to mount: %v", err2)
+ }
+ return errors.Wrapf(err, "error mounting new container")
+ }
+ b.mountPoint = mountPoint
+ b.builder = builder
+ return nil
+}
+
+// Delete deletes the working container, if we have one. The Executor object
+// should not be used to build another image, as the name of the output image
+// isn't resettable.
+func (b *Executor) Delete() (err error) {
+ if b.builder != nil {
+ err = b.builder.Delete()
+ b.builder = nil
+ }
+ return err
+}
+
+// Execute runs each of the steps in the parsed tree, in turn.
+func (b *Executor) Execute(ib *imagebuilder.Builder, node *parser.Node) error {
+ for i, node := range node.Children {
+ step := ib.Step()
+ if err := step.Resolve(node); err != nil {
+ return errors.Wrapf(err, "error resolving step %+v", *node)
+ }
+ logrus.Debugf("Parsed Step: %+v", *step)
+ if !b.quiet {
+ b.log("%s", step.Original)
+ }
+ requiresStart := false
+ if i < len(node.Children)-1 {
+ requiresStart = ib.RequiresStart(&parser.Node{Children: node.Children[i+1:]})
+ }
+ err := ib.Run(step, b, requiresStart)
+ if err != nil {
+ return errors.Wrapf(err, "error building at step %+v", *step)
+ }
+ }
+ return nil
+}
+
+// Commit writes the container's contents to an image, using a passed-in tag as
+// the name if there is one, generating a unique ID-based one otherwise.
+func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder) (err error) {
+ var imageRef types.ImageReference
+ if b.output != "" {
+ imageRef, err = alltransports.ParseImageName(b.output)
+ if err != nil {
+ imageRef2, err2 := is.Transport.ParseStoreReference(b.store, b.output)
+ if err2 == nil {
+ imageRef = imageRef2
+ err = nil
+ } else {
+ err = err2
+ }
+ }
+ } else {
+ imageRef, err = is.Transport.ParseStoreReference(b.store, "@"+stringid.GenerateRandomID())
+ }
+ if err != nil {
+ return errors.Wrapf(err, "error parsing reference for image to be written")
+ }
+ if ib.Author != "" {
+ b.builder.SetMaintainer(ib.Author)
+ }
+ config := ib.Config()
+ b.builder.SetHostname(config.Hostname)
+ b.builder.SetDomainname(config.Domainname)
+ b.builder.SetUser(config.User)
+ b.builder.ClearPorts()
+ for p := range config.ExposedPorts {
+ b.builder.SetPort(string(p))
+ }
+ b.builder.ClearEnv()
+ for _, envSpec := range config.Env {
+ spec := strings.SplitN(envSpec, "=", 2)
+ b.builder.SetEnv(spec[0], spec[1])
+ }
+ b.builder.SetCmd(config.Cmd)
+ b.builder.ClearVolumes()
+ for v := range config.Volumes {
+ b.builder.AddVolume(v)
+ }
+ b.builder.SetWorkDir(config.WorkingDir)
+ b.builder.SetEntrypoint(config.Entrypoint)
+ b.builder.SetShell(config.Shell)
+ b.builder.SetStopSignal(config.StopSignal)
+ b.builder.ClearLabels()
+ for k, v := range config.Labels {
+ b.builder.SetLabel(k, v)
+ }
+ if imageRef != nil {
+ logName := transports.ImageName(imageRef)
+ logrus.Debugf("COMMIT %q", logName)
+ if !b.quiet {
+ b.log("COMMIT %s", logName)
+ }
+ } else {
+ logrus.Debugf("COMMIT")
+ if !b.quiet {
+ b.log("COMMIT")
+ }
+ }
+ options := buildah.CommitOptions{
+ Compression: b.compression,
+ SignaturePolicyPath: b.signaturePolicyPath,
+ AdditionalTags: b.additionalTags,
+ ReportWriter: b.reportWriter,
+ PreferredManifestType: b.outputFormat,
+ }
+ return b.builder.Commit(ctx, imageRef, options)
+}
+
+// Build takes care of the details of running Prepare/Execute/Commit/Delete
+// over each of the one or more parsed Dockerfiles and stages.
+func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error {
+ if len(stages) == 0 {
+ errors.New("error building: no stages to build")
+ }
+ var stageExecutor *Executor
+ for _, stage := range stages {
+ stageExecutor = b.withName(stage.Name, stage.Position)
+ if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil {
+ return err
+ }
+ defer stageExecutor.Delete()
+ if err := stageExecutor.Execute(stage.Builder, stage.Node); err != nil {
+ return err
+ }
+ }
+ return stageExecutor.Commit(ctx, stages[len(stages)-1].Builder)
+}
+
+// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
+// URLs), creates a new Executor, and then runs Prepare/Execute/Commit/Delete
+// over the entire set of instructions.
+func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOptions, paths ...string) error {
+ if len(paths) == 0 {
+ return errors.Errorf("error building: no dockerfiles specified")
+ }
+ var dockerfiles []io.ReadCloser
+ defer func(dockerfiles ...io.ReadCloser) {
+ for _, d := range dockerfiles {
+ d.Close()
+ }
+ }(dockerfiles...)
+ for _, dfile := range paths {
+ if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
+ logrus.Debugf("reading remote Dockerfile %q", dfile)
+ resp, err := http.Get(dfile)
+ if err != nil {
+ return errors.Wrapf(err, "error getting %q", dfile)
+ }
+ if resp.ContentLength == 0 {
+ resp.Body.Close()
+ return errors.Errorf("no contents in %q", dfile)
+ }
+ dockerfiles = append(dockerfiles, resp.Body)
+ } else {
+ if !filepath.IsAbs(dfile) {
+ logrus.Debugf("resolving local Dockerfile %q", dfile)
+ dfile = filepath.Join(options.ContextDirectory, dfile)
+ }
+ logrus.Debugf("reading local Dockerfile %q", dfile)
+ contents, err := os.Open(dfile)
+ if err != nil {
+ return errors.Wrapf(err, "error reading %q", dfile)
+ }
+ dinfo, err := contents.Stat()
+ if err != nil {
+ contents.Close()
+ return errors.Wrapf(err, "error reading info about %q", dfile)
+ }
+ if dinfo.Size() == 0 {
+ contents.Close()
+ return errors.Wrapf(err, "no contents in %q", dfile)
+ }
+ dockerfiles = append(dockerfiles, contents)
+ }
+ }
+ mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0])
+ if err != nil {
+ return errors.Wrapf(err, "error parsing main Dockerfile")
+ }
+ for _, d := range dockerfiles[1:] {
+ additionalNode, err := imagebuilder.ParseDockerfile(d)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing additional Dockerfile")
+ }
+ mainNode.Children = append(mainNode.Children, additionalNode.Children...)
+ }
+ exec, err := NewExecutor(store, options)
+ if err != nil {
+ return errors.Wrapf(err, "error creating build executor")
+ }
+ b := imagebuilder.NewBuilder(options.Args)
+ stages := imagebuilder.NewStages(mainNode, b)
+ return exec.Build(ctx, stages)
+}
diff --git a/vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go
new file mode 100644
index 000000000..b2452b61c
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go
@@ -0,0 +1,145 @@
+package imagebuildah
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/reexec"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ symlinkChrootedCommand = "chrootsymlinks-resolve"
+ maxSymlinksResolved = 40
+)
+
+func init() {
+ reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks)
+}
+
+func resolveChrootedSymlinks() {
+ status := 0
+ flag.Parse()
+ if len(flag.Args()) < 1 {
+ os.Exit(1)
+ }
+ // Our first parameter is the directory to chroot into.
+ if err := unix.Chdir(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chdir(): %v\n", err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chroot(): %v\n", err)
+ os.Exit(1)
+ }
+
+ // Our second paramter is the path name to evaluate for symbolic links
+ symLink, err := getSymbolicLink(flag.Arg(0), flag.Arg(1))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err)
+ os.Exit(1)
+ }
+ if _, err := os.Stdout.WriteString(symLink); err != nil {
+ fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err)
+ os.Exit(1)
+ }
+ os.Exit(status)
+}
+
+func resolveSymLink(rootdir, filename string) (string, error) {
+ // The child process expects a chroot and one path that
+ // will be consulted relative to the chroot directory and evaluated
+ // for any symbolic links present.
+ cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", errors.Wrapf(err, string(output))
+ }
+
+ // Hand back the resolved symlink, will be "" if a symlink is not found
+ return string(output), nil
+}
+
+// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear.
+// Returns what the whole target path for what "path" resolves to.
+func getSymbolicLink(rootdir, path string) (string, error) {
+ var (
+ symPath string
+ symLinksResolved int
+ )
+
+ // Splitting path as we need to resolve each parth of the path at a time
+ splitPath := strings.Split(path, "/")
+ if splitPath[0] == "" {
+ splitPath = splitPath[1:]
+ symPath = "/"
+ }
+
+ for _, p := range splitPath {
+ // If we have resolved 40 symlinks, that means something is terribly wrong
+ // will return an error and exit
+ if symLinksResolved >= maxSymlinksResolved {
+ return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
+ }
+
+ symPath = filepath.Join(symPath, p)
+ isSymlink, resolvedPath, err := hasSymlink(symPath)
+ if err != nil {
+ return "", errors.Wrapf(err, "error checking symlink for %q", symPath)
+ }
+ // if isSymlink is true, check if resolvedPath is potentially another symlink
+ // keep doing this till resolvedPath is not a symlink and isSymlink is false
+ for isSymlink == true {
+ // Need to keep track of number of symlinks resolved
+ // Will also return an error if the symlink points to itself as that will exceed maxSymlinksResolved
+ if symLinksResolved >= maxSymlinksResolved {
+ return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
+ }
+ isSymlink, resolvedPath, err = hasSymlink(resolvedPath)
+ if err != nil {
+ return "", errors.Wrapf(err, "error checking symlink for %q", resolvedPath)
+ }
+ symLinksResolved++
+ }
+ // Assign resolvedPath to symPath. The next part of the loop will append the next part of the original path
+ // and continue resolving
+ symPath = resolvedPath
+ symLinksResolved++
+ }
+ return symPath, nil
+}
+
+// hasSymlink returns true and the target if path is symlink
+// otherwise it returns false and path
+func hasSymlink(path string) (bool, string, error) {
+ info, err := os.Lstat(path)
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(path, 0755); err != nil {
+ return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
+ }
+ info, err = os.Lstat(path)
+ if err != nil {
+ return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ }
+ }
+ // Return false and path as path is not a symlink
+ if info.Mode()&os.ModeSymlink != os.ModeSymlink {
+ return false, path, nil
+ }
+
+ // Read the symlink to get what it points to
+ targetDir, err := os.Readlink(path)
+ if err != nil {
+ return false, "", errors.Wrapf(err, "error reading link %q", path)
+ }
+ // if the symlink points to a relative path, prepend the path till now to the resolved path
+ if !filepath.IsAbs(targetDir) {
+ targetDir = filepath.Join(path, targetDir)
+ }
+ // run filepath.Clean to remove the ".." from relative paths
+ return true, filepath.Clean(targetDir), nil
+}
diff --git a/vendor/github.com/projectatomic/buildah/imagebuildah/util.go b/vendor/github.com/projectatomic/buildah/imagebuildah/util.go
new file mode 100644
index 000000000..805cfce44
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/imagebuildah/util.go
@@ -0,0 +1,96 @@
+package imagebuildah
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/buildah"
+ "github.com/sirupsen/logrus"
+)
+
+func cloneToDirectory(url, dir string) error {
+ if !strings.HasPrefix(url, "git://") {
+ url = "git://" + url
+ }
+ logrus.Debugf("cloning %q to %q", url, dir)
+ cmd := exec.Command("git", "clone", url, dir)
+ return cmd.Run()
+}
+
+func downloadToDirectory(url, dir string) error {
+ logrus.Debugf("extracting %q to %q", url, dir)
+ resp, err := http.Get(url)
+ if err != nil {
+ return errors.Wrapf(err, "error getting %q", url)
+ }
+ defer resp.Body.Close()
+ if resp.ContentLength == 0 {
+ return errors.Errorf("no contents in %q", url)
+ }
+ return chrootarchive.Untar(resp.Body, dir, nil)
+}
+
+// TempDirForURL checks if the passed-in string looks like a URL. If it is,
+// TempDirForURL creates a temporary directory, arranges for its contents to be
+// the contents of that URL, and returns the temporary directory's path, along
+// with the name of a subdirectory which should be used as the build context
+// (which may be empty or "."). Removal of the temporary directory is the
+// responsibility of the caller. If the string doesn't look like a URL,
+// TempDirForURL returns empty strings and a nil error code.
+func TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) {
+ if !strings.HasPrefix(url, "http://") &&
+ !strings.HasPrefix(url, "https://") &&
+ !strings.HasPrefix(url, "git://") &&
+ !strings.HasPrefix(url, "github.com/") {
+ return "", "", nil
+ }
+ name, err = ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url)
+ }
+ if strings.HasPrefix(url, "git://") {
+ err = cloneToDirectory(url, name)
+ if err != nil {
+ if err2 := os.Remove(name); err2 != nil {
+ logrus.Debugf("error removing temporary directory %q: %v", name, err2)
+ }
+ return "", "", err
+ }
+ return name, "", nil
+ }
+ if strings.HasPrefix(url, "github.com/") {
+ ghurl := url
+ url = fmt.Sprintf("https://%s/archive/master.tar.gz", ghurl)
+ logrus.Debugf("resolving url %q to %q", ghurl, url)
+ subdir = path.Base(ghurl) + "-master"
+ }
+ if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
+ err = downloadToDirectory(url, name)
+ if err != nil {
+ if err2 := os.Remove(name); err2 != nil {
+ logrus.Debugf("error removing temporary directory %q: %v", name, err2)
+ }
+ return "", subdir, err
+ }
+ return name, subdir, nil
+ }
+ logrus.Debugf("don't know how to retrieve %q", url)
+ if err2 := os.Remove(name); err2 != nil {
+ logrus.Debugf("error removing temporary directory %q: %v", name, err2)
+ }
+ return "", "", errors.Errorf("unreachable code reached")
+}
+
+// InitReexec is a wrapper for buildah.InitReexec(). It should be called at
+// the start of main(), and if it returns true, main() should return
+// immediately.
+func InitReexec() bool {
+ return buildah.InitReexec()
+}
diff --git a/vendor/github.com/projectatomic/buildah/import.go b/vendor/github.com/projectatomic/buildah/import.go
new file mode 100644
index 000000000..b98219107
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/import.go
@@ -0,0 +1,123 @@
+package buildah
+
+import (
+ "context"
+
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/buildah/docker"
+ "github.com/projectatomic/buildah/util"
+)
+
+func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) {
+ manifest := []byte{}
+ config := []byte{}
+ imageName := ""
+
+ if imageID != "" {
+ ref, err := is.Transport.ParseStoreReference(store, imageID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "no such image %q", imageID)
+ }
+ src, err2 := ref.NewImage(ctx, systemContext)
+ if err2 != nil {
+ return nil, errors.Wrapf(err2, "error instantiating image")
+ }
+ defer src.Close()
+ config, err = src.ConfigBlob(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image configuration")
+ }
+ manifest, _, err = src.Manifest(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading image manifest")
+ }
+ if img, err3 := store.Image(imageID); err3 == nil {
+ if len(img.Names) > 0 {
+ imageName = img.Names[0]
+ }
+ }
+ }
+
+ builder := &Builder{
+ store: store,
+ Type: containerType,
+ FromImage: imageName,
+ FromImageID: imageID,
+ Config: config,
+ Manifest: manifest,
+ Container: containerName,
+ ContainerID: containerID,
+ ImageAnnotations: map[string]string{},
+ ImageCreatedBy: "",
+ }
+
+ builder.initConfig()
+
+ return builder, nil
+}
+
+func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
+ if options.Container == "" {
+ return nil, errors.Errorf("container name must be specified")
+ }
+
+ c, err := store.Container(options.Container)
+ if err != nil {
+ return nil, err
+ }
+
+ systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath)
+
+ builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ if builder.FromImageID != "" {
+ if d, err2 := digest.Parse(builder.FromImageID); err2 == nil {
+ builder.Docker.Parent = docker.ID(d)
+ } else {
+ builder.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), builder.FromImageID))
+ }
+ }
+ if builder.FromImage != "" {
+ builder.Docker.ContainerConfig.Image = builder.FromImage
+ }
+
+ err = builder.Save()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error saving builder state")
+ }
+
+ return builder, nil
+}
+
+func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) {
+ var img *storage.Image
+ var err error
+
+ if options.Image == "" {
+ return nil, errors.Errorf("image name must be specified")
+ }
+
+ systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+
+ for _, image := range util.ResolveName(options.Image, "", systemContext, store) {
+ img, err = util.FindImage(store, image)
+ if err != nil {
+ continue
+ }
+
+ builder, err2 := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "")
+ if err2 != nil {
+ return nil, errors.Wrapf(err2, "error importing build settings from image %q", options.Image)
+ }
+
+ return builder, nil
+ }
+ return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image)
+}
diff --git a/vendor/github.com/projectatomic/buildah/mount.go b/vendor/github.com/projectatomic/buildah/mount.go
new file mode 100644
index 000000000..4f1ae3c6e
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/mount.go
@@ -0,0 +1,17 @@
+package buildah
+
+// Mount mounts a container's root filesystem in a location which can be
+// accessed from the host, and returns the location.
+func (b *Builder) Mount(label string) (string, error) {
+ mountpoint, err := b.store.Mount(b.ContainerID, label)
+ if err != nil {
+ return "", err
+ }
+ b.MountPoint = mountpoint
+
+ err = b.Save()
+ if err != nil {
+ return "", err
+ }
+ return mountpoint, nil
+}
diff --git a/vendor/github.com/projectatomic/buildah/new.go b/vendor/github.com/projectatomic/buildah/new.go
new file mode 100644
index 000000000..82de524c0
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/new.go
@@ -0,0 +1,313 @@
+package buildah
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/transports/alltransports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/opencontainers/selinux/go-selinux"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/openshift/imagebuilder"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/buildah/util"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // BaseImageFakeName is the "name" of a source image which we interpret
+ // as "no image".
+ BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
+
+ // DefaultTransport is a prefix that we apply to an image name if we
+ // can't find one in the local Store, in order to generate a source
+ // reference for the image that we can then copy to the local Store.
+ DefaultTransport = "docker://"
+
+ // minimumTruncatedIDLength is the minimum length of an identifier that
+ // we'll accept as possibly being a truncated image ID.
+ minimumTruncatedIDLength = 3
+)
+
+func reserveSELinuxLabels(store storage.Store, id string) error {
+ if selinux.GetEnabled() {
+ containers, err := store.Containers()
+ if err != nil {
+ return err
+ }
+
+ for _, c := range containers {
+ if id == c.ID {
+ continue
+ } else {
+ b, err := OpenBuilder(store, c.ID)
+ if err != nil {
+ if os.IsNotExist(err) {
+ // Ignore not exist errors since containers probably created by other tool
+ // TODO, we need to read other containers json data to reserve their SELinux labels
+ continue
+ }
+ return err
+ }
+ // Prevent containers from using same MCS Label
+ if err := label.ReserveLabel(b.ProcessLabel); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func pullAndFindImage(ctx context.Context, store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
+ ref, err := pullImage(ctx, store, imageName, options, sc)
+ if err != nil {
+ logrus.Debugf("error pulling image %q: %v", imageName, err)
+ return nil, nil, err
+ }
+ img, err := is.Transport.GetStoreImage(store, ref)
+ if err != nil {
+ logrus.Debugf("error reading pulled image %q: %v", imageName, err)
+ return nil, nil, err
+ }
+ return img, ref, nil
+}
+
+func getImageName(name string, img *storage.Image) string {
+ imageName := name
+ if len(img.Names) > 0 {
+ imageName = img.Names[0]
+ // When the image used by the container is a tagged image
+ // the container name might be set to the original image instead of
+ // the image given in the "form" command line.
+ // This loop is supposed to fix this.
+ for _, n := range img.Names {
+ if strings.Contains(n, name) {
+ imageName = n
+ break
+ }
+ }
+ }
+ return imageName
+}
+
+func imageNamePrefix(imageName string) string {
+ prefix := imageName
+ s := strings.Split(imageName, "/")
+ if len(s) > 0 {
+ prefix = s[len(s)-1]
+ }
+ s = strings.Split(prefix, ":")
+ if len(s) > 0 {
+ prefix = s[0]
+ }
+ s = strings.Split(prefix, "@")
+ if len(s) > 0 {
+ prefix = s[0]
+ }
+ return prefix
+}
+
+func imageManifestAndConfig(ctx context.Context, ref types.ImageReference, systemContext *types.SystemContext) (manifest, config []byte, err error) {
+ if ref != nil {
+ src, err := ref.NewImage(ctx, systemContext)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref))
+ }
+ defer src.Close()
+ config, err := src.ConfigBlob(ctx)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(ref))
+ }
+ manifest, _, err := src.Manifest(ctx)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(ref))
+ }
+ return manifest, config, nil
+ }
+ return nil, nil, nil
+}
+
+func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
+ var ref types.ImageReference
+ var img *storage.Image
+ var err error
+ var manifest []byte
+ var config []byte
+
+ if options.FromImage == BaseImageFakeName {
+ options.FromImage = ""
+ }
+ if options.Transport == "" {
+ options.Transport = DefaultTransport
+ }
+
+ systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+
+ for _, image := range util.ResolveName(options.FromImage, options.Registry, systemContext, store) {
+ if len(image) >= minimumTruncatedIDLength {
+ if img, err = store.Image(image); err == nil && img != nil && strings.HasPrefix(img.ID, image) {
+ if ref, err = is.Transport.ParseStoreReference(store, img.ID); err != nil {
+ return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
+ }
+ break
+ }
+ }
+
+ if options.PullPolicy == PullAlways {
+ pulledImg, pulledReference, err2 := pullAndFindImage(ctx, store, image, options, systemContext)
+ if err2 != nil {
+ logrus.Debugf("error pulling and reading image %q: %v", image, err2)
+ err = err2
+ continue
+ }
+ ref = pulledReference
+ img = pulledImg
+ break
+ }
+
+ srcRef, err2 := alltransports.ParseImageName(image)
+ if err2 != nil {
+ if options.Transport == "" {
+ logrus.Debugf("error parsing image name %q: %v", image, err2)
+ err = err2
+ continue
+ }
+ transport := options.Transport
+ if transport != DefaultTransport {
+ transport = transport + ":"
+ }
+ srcRef2, err3 := alltransports.ParseImageName(transport + image)
+ if err3 != nil {
+ logrus.Debugf("error parsing image name %q: %v", image, err2)
+ err = err3
+ continue
+ }
+ srcRef = srcRef2
+ }
+
+ destImage, err2 := localImageNameForReference(ctx, store, srcRef, options.FromImage)
+ if err2 != nil {
+ return nil, errors.Wrapf(err2, "error computing local image name for %q", transports.ImageName(srcRef))
+ }
+ if destImage == "" {
+ return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
+ }
+
+ ref, err = is.Transport.ParseStoreReference(store, destImage)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing reference to image %q", destImage)
+ }
+ img, err = is.Transport.GetStoreImage(store, ref)
+ if err != nil {
+ if errors.Cause(err) == storage.ErrImageUnknown && options.PullPolicy != PullIfMissing {
+ logrus.Debugf("no such image %q: %v", transports.ImageName(ref), err)
+ continue
+ }
+ pulledImg, pulledReference, err2 := pullAndFindImage(ctx, store, image, options, systemContext)
+ if err2 != nil {
+ logrus.Debugf("error pulling and reading image %q: %v", image, err2)
+ err = err2
+ continue
+ }
+ ref = pulledReference
+ img = pulledImg
+ }
+ break
+ }
+
+ if options.FromImage != "" && (ref == nil || img == nil) {
+ // If options.FromImage is set but we ended up
+ // with nil in ref or in img then there was an error that
+ // we should return.
+ return nil, util.GetFailureCause(err, errors.Wrapf(storage.ErrImageUnknown, "no such image %q in registry", options.FromImage))
+ }
+ image := options.FromImage
+ imageID := ""
+ if img != nil {
+ image = getImageName(imageNamePrefix(image), img)
+ imageID = img.ID
+ }
+ if manifest, config, err = imageManifestAndConfig(ctx, ref, systemContext); err != nil {
+ return nil, errors.Wrapf(err, "error reading data from image %q", transports.ImageName(ref))
+ }
+
+ name := "working-container"
+ if options.Container != "" {
+ name = options.Container
+ } else {
+ var err2 error
+ if image != "" {
+ name = imageNamePrefix(image) + "-" + name
+ }
+ suffix := 1
+ tmpName := name
+ for errors.Cause(err2) != storage.ErrContainerUnknown {
+ _, err2 = store.Container(tmpName)
+ if err2 == nil {
+ suffix++
+ tmpName = fmt.Sprintf("%s-%d", name, suffix)
+ }
+ }
+ name = tmpName
+ }
+
+ coptions := storage.ContainerOptions{}
+ container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating container")
+ }
+
+ defer func() {
+ if err != nil {
+ if err2 := store.DeleteContainer(container.ID); err != nil {
+ logrus.Errorf("error deleting container %q: %v", container.ID, err2)
+ }
+ }
+ }()
+
+ if err = reserveSELinuxLabels(store, container.ID); err != nil {
+ return nil, err
+ }
+ processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ builder := &Builder{
+ store: store,
+ Type: containerType,
+ FromImage: image,
+ FromImageID: imageID,
+ Config: config,
+ Manifest: manifest,
+ Container: name,
+ ContainerID: container.ID,
+ ImageAnnotations: map[string]string{},
+ ImageCreatedBy: "",
+ ProcessLabel: processLabel,
+ MountLabel: mountLabel,
+ DefaultMountsFilePath: options.DefaultMountsFilePath,
+ CommonBuildOpts: options.CommonBuildOpts,
+ }
+
+ if options.Mount {
+ _, err = builder.Mount(mountLabel)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error mounting build container")
+ }
+ }
+
+ builder.initConfig()
+ err = builder.Save()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error saving builder state")
+ }
+
+ return builder, nil
+}
diff --git a/vendor/github.com/projectatomic/buildah/pkg/cli/common.go b/vendor/github.com/projectatomic/buildah/pkg/cli/common.go
new file mode 100644
index 000000000..bead9e6be
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/pkg/cli/common.go
@@ -0,0 +1,130 @@
+package cli
+
+// the cli package contains urfave/cli related structs that help make up
+// the command line for buildah commands. it resides here so other projects
+// that vendor in this code can use them too.
+
+import (
+ "github.com/projectatomic/buildah/imagebuildah"
+ "github.com/urfave/cli"
+)
+
+var (
+ BudFlags = []cli.Flag{
+ cli.StringFlag{
+ Name: "authfile",
+ Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
+ },
+ cli.StringSliceFlag{
+ Name: "build-arg",
+ Usage: "`argument=value` to supply to the builder",
+ },
+ cli.StringFlag{
+ Name: "cert-dir",
+ Value: "",
+ Usage: "use certificates at the specified path to access the registry",
+ },
+ cli.StringFlag{
+ Name: "creds",
+ Value: "",
+ Usage: "use `[username[:password]]` for accessing the registry",
+ },
+ cli.StringSliceFlag{
+ Name: "file, f",
+ Usage: "`pathname or URL` of a Dockerfile",
+ },
+ cli.StringFlag{
+ Name: "format",
+ Usage: "`format` of the built image's manifest and metadata",
+ },
+ cli.BoolTFlag{
+ Name: "pull",
+ Usage: "pull the image if not present",
+ },
+ cli.BoolFlag{
+ Name: "pull-always",
+ Usage: "pull the image, even if a version is present",
+ },
+ cli.BoolFlag{
+ Name: "quiet, q",
+ Usage: "refrain from announcing build instructions and image read/write progress",
+ },
+ cli.StringFlag{
+ Name: "runtime",
+ Usage: "`path` to an alternate runtime",
+ Value: imagebuildah.DefaultRuntime,
+ },
+ cli.StringSliceFlag{
+ Name: "runtime-flag",
+ Usage: "add global flags for the container runtime",
+ },
+ cli.StringFlag{
+ Name: "signature-policy",
+ Usage: "`pathname` of signature policy file (not usually used)",
+ },
+ cli.StringSliceFlag{
+ Name: "tag, t",
+ Usage: "`tag` to apply to the built image",
+ },
+ cli.BoolTFlag{
+ Name: "tls-verify",
+ Usage: "require HTTPS and verify certificates when accessing the registry",
+ },
+ }
+
+ FromAndBudFlags = []cli.Flag{
+ cli.StringSliceFlag{
+ Name: "add-host",
+ Usage: "add a custom host-to-IP mapping (host:ip) (default [])",
+ },
+ cli.StringFlag{
+ Name: "cgroup-parent",
+ Usage: "optional parent cgroup for the container",
+ },
+ cli.Uint64Flag{
+ Name: "cpu-period",
+ Usage: "limit the CPU CFS (Completely Fair Scheduler) period",
+ },
+ cli.Int64Flag{
+ Name: "cpu-quota",
+ Usage: "limit the CPU CFS (Completely Fair Scheduler) quota",
+ },
+ cli.Uint64Flag{
+ Name: "cpu-shares",
+ Usage: "CPU shares (relative weight)",
+ },
+ cli.StringFlag{
+ Name: "cpuset-cpus",
+ Usage: "CPUs in which to allow execution (0-3, 0,1)",
+ },
+ cli.StringFlag{
+ Name: "cpuset-mems",
+ Usage: "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
+ },
+ cli.StringFlag{
+ Name: "memory, m",
+ Usage: "memory limit (format: <number>[<unit>], where unit = b, k, m or g)",
+ },
+ cli.StringFlag{
+ Name: "memory-swap",
+ Usage: "swap limit equal to memory plus swap: '-1' to enable unlimited swap",
+ },
+ cli.StringSliceFlag{
+ Name: "security-opt",
+ Usage: "security Options (default [])",
+ },
+ cli.StringFlag{
+ Name: "shm-size",
+ Usage: "size of `/dev/shm`. The format is `<number><unit>`.",
+ Value: "65536k",
+ },
+ cli.StringSliceFlag{
+ Name: "ulimit",
+ Usage: "ulimit options (default [])",
+ },
+ cli.StringSliceFlag{
+ Name: "volume, v",
+ Usage: "bind mount a volume into the container (default [])",
+ },
+ }
+)
diff --git a/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go b/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go
new file mode 100644
index 000000000..f2159d930
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go
@@ -0,0 +1,323 @@
+package parse
+
+// this package should contain functions that parse and validate
+// user input and is shared either amongst buildah subcommands or
+// would be useful to projects vendoring buildah
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/containers/image/types"
+ "github.com/docker/go-units"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/buildah"
+ "github.com/urfave/cli"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+const (
+ // SeccompDefaultPath defines the default seccomp path
+ SeccompDefaultPath = "/usr/share/containers/seccomp.json"
+ // SeccompOverridePath if this exists it overrides the default seccomp path
+ SeccompOverridePath = "/etc/crio/seccomp.json"
+)
+
+// ParseCommonBuildOptions parses the build options from the bud cli
+func ParseCommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) {
+ var (
+ memoryLimit int64
+ memorySwap int64
+ err error
+ )
+ if c.String("memory") != "" {
+ memoryLimit, err = units.RAMInBytes(c.String("memory"))
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid value for memory")
+ }
+ }
+ if c.String("memory-swap") != "" {
+ memorySwap, err = units.RAMInBytes(c.String("memory-swap"))
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid value for memory-swap")
+ }
+ }
+ if len(c.StringSlice("add-host")) > 0 {
+ for _, host := range c.StringSlice("add-host") {
+ if err := validateExtraHost(host); err != nil {
+ return nil, errors.Wrapf(err, "invalid value for add-host")
+ }
+ }
+ }
+ if _, err := units.FromHumanSize(c.String("shm-size")); err != nil {
+ return nil, errors.Wrapf(err, "invalid --shm-size")
+ }
+ if err := parseVolumes(c.StringSlice("volume")); err != nil {
+ return nil, err
+ }
+
+ commonOpts := &buildah.CommonBuildOptions{
+ AddHost: c.StringSlice("add-host"),
+ CgroupParent: c.String("cgroup-parent"),
+ CPUPeriod: c.Uint64("cpu-period"),
+ CPUQuota: c.Int64("cpu-quota"),
+ CPUSetCPUs: c.String("cpuset-cpus"),
+ CPUSetMems: c.String("cpuset-mems"),
+ CPUShares: c.Uint64("cpu-shares"),
+ Memory: memoryLimit,
+ MemorySwap: memorySwap,
+ ShmSize: c.String("shm-size"),
+ Ulimit: c.StringSlice("ulimit"),
+ Volumes: c.StringSlice("volume"),
+ }
+ if err := parseSecurityOpts(c.StringSlice("security-opt"), commonOpts); err != nil {
+ return nil, err
+ }
+ return commonOpts, nil
+}
+
+func parseSecurityOpts(securityOpts []string, commonOpts *buildah.CommonBuildOptions) error {
+ for _, opt := range securityOpts {
+ if opt == "no-new-privileges" {
+ return errors.Errorf("no-new-privileges is not supported")
+ }
+ con := strings.SplitN(opt, "=", 2)
+ if len(con) != 2 {
+ return errors.Errorf("Invalid --security-opt 1: %q", opt)
+ }
+
+ switch con[0] {
+ case "label":
+ commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1])
+ case "apparmor":
+ commonOpts.ApparmorProfile = con[1]
+ case "seccomp":
+ commonOpts.SeccompProfilePath = con[1]
+ default:
+ return errors.Errorf("Invalid --security-opt 2: %q", opt)
+ }
+
+ }
+
+ if commonOpts.SeccompProfilePath == "" {
+ if _, err := os.Stat(SeccompOverridePath); err == nil {
+ commonOpts.SeccompProfilePath = SeccompOverridePath
+ } else {
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "can't check if %q exists", SeccompOverridePath)
+ }
+ if _, err := os.Stat(SeccompDefaultPath); err != nil {
+ if !os.IsNotExist(err) {
+ return errors.Wrapf(err, "can't check if %q exists", SeccompDefaultPath)
+ }
+ } else {
+ commonOpts.SeccompProfilePath = SeccompDefaultPath
+ }
+ }
+ }
+ return nil
+}
+
+func parseVolumes(volumes []string) error {
+ if len(volumes) == 0 {
+ return nil
+ }
+ for _, volume := range volumes {
+ arr := strings.SplitN(volume, ":", 3)
+ if len(arr) < 2 {
+ return errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
+ }
+ if err := validateVolumeHostDir(arr[0]); err != nil {
+ return err
+ }
+ if err := validateVolumeCtrDir(arr[1]); err != nil {
+ return err
+ }
+ if len(arr) > 2 {
+ if err := validateVolumeOpts(arr[2]); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func validateVolumeHostDir(hostDir string) error {
+ if _, err := os.Stat(hostDir); err != nil {
+ return errors.Wrapf(err, "error checking path %q", hostDir)
+ }
+ return nil
+}
+
+func validateVolumeCtrDir(ctrDir string) error {
+ if ctrDir[0] != '/' {
+ return errors.Errorf("invalid container directory path %q", ctrDir)
+ }
+ return nil
+}
+
+func validateVolumeOpts(option string) error {
+ var foundRootPropagation, foundRWRO, foundLabelChange int
+ options := strings.Split(option, ",")
+ for _, opt := range options {
+ switch opt {
+ case "rw", "ro":
+ if foundRWRO > 1 {
+ return errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", option)
+ }
+ foundRWRO++
+ case "z", "Z":
+ if foundLabelChange > 1 {
+ return errors.Errorf("invalid options %q, can only specify 1 'z' or 'Z' option", option)
+ }
+ foundLabelChange++
+ case "private", "rprivate", "shared", "rshared", "slave", "rslave":
+ if foundRootPropagation > 1 {
+ return errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private' or '[r]slave' option", option)
+ }
+ foundRootPropagation++
+ default:
+ return errors.Errorf("invalid option type %q", option)
+ }
+ }
+ return nil
+}
+
+// validateExtraHost validates that the specified string is a valid extrahost and returns it.
+// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
+// for add-host flag
+func validateExtraHost(val string) error {
+ // allow for IPv6 addresses in extra hosts by only splitting on first ":"
+ arr := strings.SplitN(val, ":", 2)
+ if len(arr) != 2 || len(arr[0]) == 0 {
+ return fmt.Errorf("bad format for add-host: %q", val)
+ }
+ if _, err := validateIPAddress(arr[1]); err != nil {
+ return fmt.Errorf("invalid IP address in add-host: %q", arr[1])
+ }
+ return nil
+}
+
+// validateIPAddress validates an Ip address.
+// for dns, ip, and ip6 flags also
+func validateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateFlags searches for StringFlags or StringSlice flags that never had
+// a value set. This commonly occurs when the CLI mistakenly takes the next
+// option and uses it as a value.
+func ValidateFlags(c *cli.Context, flags []cli.Flag) error {
+ re, err := regexp.Compile("^-.+")
+ if err != nil {
+ return errors.Wrap(err, "compiling regex failed")
+ }
+
+ // The --cmd flag can have a following command i.e. --cmd="--help".
+ // Let's skip this check just for the --cmd flag.
+ for _, flag := range flags {
+ switch reflect.TypeOf(flag).String() {
+ case "cli.StringSliceFlag":
+ {
+ f := flag.(cli.StringSliceFlag)
+ name := strings.Split(f.Name, ",")
+ if f.Name == "cmd" {
+ continue
+ }
+ val := c.StringSlice(name[0])
+ for _, v := range val {
+ if ok := re.MatchString(v); ok {
+ return errors.Errorf("option --%s requires a value", name[0])
+ }
+ }
+ }
+ case "cli.StringFlag":
+ {
+ f := flag.(cli.StringFlag)
+ name := strings.Split(f.Name, ",")
+ if f.Name == "cmd" {
+ continue
+ }
+ val := c.String(name[0])
+ if ok := re.MatchString(val); ok {
+ return errors.Errorf("option --%s requires a value", name[0])
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// SystemContextFromOptions returns a SystemContext populated with values
+// per the input parameters provided by the caller for the use in authentication.
+func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
+ ctx := &types.SystemContext{
+ DockerCertPath: c.String("cert-dir"),
+ }
+ if c.IsSet("tls-verify") {
+ ctx.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify")
+ }
+ if c.IsSet("creds") {
+ var err error
+ ctx.DockerAuthConfig, err = getDockerAuth(c.String("creds"))
+ if err != nil {
+ return nil, err
+ }
+ }
+ if c.IsSet("signature-policy") {
+ ctx.SignaturePolicyPath = c.String("signature-policy")
+ }
+ if c.IsSet("authfile") {
+ ctx.AuthFilePath = c.String("authfile")
+ }
+ if c.GlobalIsSet("registries-conf") {
+ ctx.SystemRegistriesConfPath = c.GlobalString("registries-conf")
+ }
+ if c.GlobalIsSet("registries-conf-dir") {
+ ctx.RegistriesDirPath = c.GlobalString("registries-conf-dir")
+ }
+ return ctx, nil
+}
+
+func parseCreds(creds string) (string, string) {
+ if creds == "" {
+ return "", ""
+ }
+ up := strings.SplitN(creds, ":", 2)
+ if len(up) == 1 {
+ return up[0], ""
+ }
+ if up[0] == "" {
+ return "", up[1]
+ }
+ return up[0], up[1]
+}
+
+func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
+ username, password := parseCreds(creds)
+ if username == "" {
+ fmt.Print("Username: ")
+ fmt.Scanln(&username)
+ }
+ if password == "" {
+ fmt.Print("Password: ")
+ termPassword, err := terminal.ReadPassword(0)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not read password from terminal")
+ }
+ password = string(termPassword)
+ }
+
+ return &types.DockerAuthConfig{
+ Username: username,
+ Password: password,
+ }, nil
+}
diff --git a/vendor/github.com/projectatomic/buildah/pull.go b/vendor/github.com/projectatomic/buildah/pull.go
new file mode 100644
index 000000000..9b8578651
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/pull.go
@@ -0,0 +1,186 @@
+package buildah
+
+import (
+ "context"
+ "strings"
+
+ cp "github.com/containers/image/copy"
+ "github.com/containers/image/docker/reference"
+ tarfile "github.com/containers/image/docker/tarfile"
+ ociarchive "github.com/containers/image/oci/archive"
+ "github.com/containers/image/signature"
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/transports/alltransports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/projectatomic/buildah/util"
+ "github.com/sirupsen/logrus"
+)
+
+func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) {
+ if srcRef == nil {
+ return "", errors.Errorf("reference to image is empty")
+ }
+ split := strings.SplitN(spec, ":", 2)
+ file := split[len(split)-1]
+ var name string
+ switch srcRef.Transport().Name() {
+ case util.DockerArchive:
+ tarSource, err := tarfile.NewSourceFromFile(file)
+ if err != nil {
+ return "", err
+ }
+ manifest, err := tarSource.LoadTarManifest()
+ if err != nil {
+ return "", errors.Errorf("error retrieving manifest.json: %v", err)
+ }
+ // to pull the first image stored in the tar file
+ if len(manifest) == 0 {
+ // use the hex of the digest if no manifest is found
+ name, err = getImageDigest(ctx, srcRef, nil)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ if len(manifest[0].RepoTags) > 0 {
+ name = manifest[0].RepoTags[0]
+ } else {
+ // If the input image has no repotags, we need to feed it a dest anyways
+ name, err = getImageDigest(ctx, srcRef, nil)
+ if err != nil {
+ return "", err
+ }
+ }
+ }
+ case util.OCIArchive:
+ // retrieve the manifest from index.json to access the image name
+ manifest, err := ociarchive.LoadManifestDescriptor(srcRef)
+ if err != nil {
+ return "", errors.Wrapf(err, "error loading manifest for %q", srcRef)
+ }
+ if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
+ return "", errors.Errorf("error, archive doesn't have a name annotation. Cannot store image with no name")
+ }
+ name = manifest.Annotations["org.opencontainers.image.ref.name"]
+ case util.DirTransport:
+ // supports pull from a directory
+ name = split[1]
+ // remove leading "/"
+ if name[:1] == "/" {
+ name = name[1:]
+ }
+ default:
+ ref := srcRef.DockerReference()
+ if ref == nil {
+ name = srcRef.StringWithinTransport()
+ _, err := is.Transport.ParseStoreReference(store, name)
+ if err == nil {
+ return name, nil
+ }
+ if strings.LastIndex(name, "/") != -1 {
+ name = name[strings.LastIndex(name, "/")+1:]
+ _, err = is.Transport.ParseStoreReference(store, name)
+ if err == nil {
+ return name, nil
+ }
+ }
+ return "", errors.Errorf("reference to image %q is not a named reference", transports.ImageName(srcRef))
+ }
+
+ if named, ok := ref.(reference.Named); ok {
+ name = named.Name()
+ if namedTagged, ok := ref.(reference.NamedTagged); ok {
+ name = name + ":" + namedTagged.Tag()
+ }
+ if canonical, ok := ref.(reference.Canonical); ok {
+ name = name + "@" + canonical.Digest().String()
+ }
+ }
+ }
+
+ if _, err := is.Transport.ParseStoreReference(store, name); err != nil {
+ return "", errors.Wrapf(err, "error parsing computed local image name %q", name)
+ }
+ return name, nil
+}
+
+func pullImage(ctx context.Context, store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (types.ImageReference, error) {
+ spec := imageName
+ srcRef, err := alltransports.ParseImageName(spec)
+ if err != nil {
+ if options.Transport == "" {
+ return nil, errors.Wrapf(err, "error parsing image name %q", spec)
+ }
+ transport := options.Transport
+ if transport != DefaultTransport {
+ transport = transport + ":"
+ }
+ spec = transport + spec
+ srcRef2, err2 := alltransports.ParseImageName(spec)
+ if err2 != nil {
+ return nil, errors.Wrapf(err2, "error parsing image name %q", spec)
+ }
+ srcRef = srcRef2
+ }
+
+ destName, err := localImageNameForReference(ctx, store, srcRef, spec)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef))
+ }
+ if destName == "" {
+ return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
+ }
+
+ destRef, err := is.Transport.ParseStoreReference(store, destName)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing image name %q", destName)
+ }
+
+ img, err := srcRef.NewImageSource(ctx, sc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error initializing %q as an image source", spec)
+ }
+ img.Close()
+
+ policy, err := signature.DefaultPolicy(sc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error obtaining default signature policy")
+ }
+
+ policyContext, err := signature.NewPolicyContext(policy)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new signature policy context")
+ }
+
+ defer func() {
+ if err2 := policyContext.Destroy(); err2 != nil {
+ logrus.Debugf("error destroying signature policy context: %v", err2)
+ }
+ }()
+
+ logrus.Debugf("copying %q to %q", spec, destName)
+
+ err = cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, options.SystemContext, nil, ""))
+ if err == nil {
+ return destRef, nil
+ }
+ return nil, err
+}
+
+// getImageDigest creates an image object and uses the hex value of the digest as the image ID
+// for parsing the store reference
+func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.SystemContext) (string, error) {
+ newImg, err := src.NewImage(ctx, sc)
+ if err != nil {
+ return "", err
+ }
+ defer newImg.Close()
+
+ digest := newImg.ConfigInfo().Digest
+ if err = digest.Validate(); err != nil {
+ return "", errors.Wrapf(err, "error getting config info")
+ }
+ return "@" + digest.Hex(), nil
+}
diff --git a/vendor/github.com/projectatomic/buildah/run.go b/vendor/github.com/projectatomic/buildah/run.go
new file mode 100644
index 000000000..12312f6a4
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/run.go
@@ -0,0 +1,479 @@
+package buildah
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/docker/docker/profiles/seccomp"
+ units "github.com/docker/go-units"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+const (
+ // DefaultWorkingDir is used if none was specified.
+ DefaultWorkingDir = "/"
+ // DefaultRuntime is the default command to use to run the container.
+ DefaultRuntime = "runc"
+)
+
+const (
+ // DefaultTerminal indicates that this Run invocation should be
+ // connected to a pseudoterminal if we're connected to a terminal.
+ DefaultTerminal = iota
+ // WithoutTerminal indicates that this Run invocation should NOT be
+ // connected to a pseudoterminal.
+ WithoutTerminal
+ // WithTerminal indicates that this Run invocation should be connected
+ // to a pseudoterminal.
+ WithTerminal
+)
+
+// RunOptions can be used to alter how a command is run in the container.
+type RunOptions struct {
+ // Hostname is the hostname we set for the running container.
+ Hostname string
+ // Runtime is the name of the command to run. It should accept the same arguments that runc does.
+ Runtime string
+ // Args adds global arguments for the runtime.
+ Args []string
+ // Mounts are additional mount points which we want to provide.
+ Mounts []specs.Mount
+ // Env is additional environment variables to set.
+ Env []string
+ // User is the user as whom to run the command.
+ User string
+ // WorkingDir is an override for the working directory.
+ WorkingDir string
+ // Shell is default shell to run in a container.
+ Shell string
+ // Cmd is an override for the configured default command.
+ Cmd []string
+ // Entrypoint is an override for the configured entry point.
+ Entrypoint []string
+ // NetworkDisabled puts the container into its own network namespace.
+ NetworkDisabled bool
+ // Terminal provides a way to specify whether or not the command should
+ // be run with a pseudoterminal. By default (DefaultTerminal), a
+ // terminal is used if os.Stdout is connected to a terminal, but that
+ // decision can be overridden by specifying either WithTerminal or
+ // WithoutTerminal.
+ Terminal int
+ // Quiet tells the run to turn off output to stdout.
+ Quiet bool
+}
+
+func addRlimits(ulimit []string, g *generate.Generator) error {
+ var (
+ ul *units.Ulimit
+ err error
+ )
+
+ for _, u := range ulimit {
+ if ul, err = units.ParseUlimit(u); err != nil {
+ return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u)
+ }
+
+ g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft))
+ }
+ return nil
+}
+
+func addHosts(hosts []string, w io.Writer) error {
+ buf := bufio.NewWriter(w)
+ for _, host := range hosts {
+ fmt.Fprintln(buf, host)
+ }
+ return buf.Flush()
+}
+
+func addHostsToFile(hosts []string, filename string) error {
+ if len(hosts) == 0 {
+ return nil
+ }
+ file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ return addHosts(hosts, file)
+}
+
+func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error {
+ // RESOURCES - CPU
+ if commonOpts.CPUPeriod != 0 {
+ g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod)
+ }
+ if commonOpts.CPUQuota != 0 {
+ g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota)
+ }
+ if commonOpts.CPUShares != 0 {
+ g.SetLinuxResourcesCPUShares(commonOpts.CPUShares)
+ }
+ if commonOpts.CPUSetCPUs != "" {
+ g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs)
+ }
+ if commonOpts.CPUSetMems != "" {
+ g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems)
+ }
+
+ // RESOURCES - MEMORY
+ if commonOpts.Memory != 0 {
+ g.SetLinuxResourcesMemoryLimit(commonOpts.Memory)
+ }
+ if commonOpts.MemorySwap != 0 {
+ g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap)
+ }
+
+ if commonOpts.CgroupParent != "" {
+ g.SetLinuxCgroupsPath(commonOpts.CgroupParent)
+ }
+
+ if err := addRlimits(commonOpts.Ulimit, g); err != nil {
+ return err
+ }
+ if err := addHostsToFile(commonOpts.AddHost, "/etc/hosts"); err != nil {
+ return err
+ }
+
+ logrus.Debugln("Resources:", commonOpts)
+ return nil
+}
+
+func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles, builtinVolumes, volumeMounts []string, shmSize string) error {
+ // The passed-in mounts matter the most to us.
+ mounts := make([]specs.Mount, len(optionMounts))
+ copy(mounts, optionMounts)
+ haveMount := func(destination string) bool {
+ for _, mount := range mounts {
+ if mount.Destination == destination {
+ // Already have something to mount there.
+ return true
+ }
+ }
+ return false
+ }
+ // Add mounts from the generated list, unless they conflict.
+ for _, specMount := range spec.Mounts {
+ if specMount.Destination == "/dev/shm" {
+ specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777", "size=" + shmSize}
+ }
+ if haveMount(specMount.Destination) {
+ // Already have something to mount there, so skip this one.
+ continue
+ }
+ mounts = append(mounts, specMount)
+ }
+ // Add bind mounts for important files, unless they conflict.
+ for _, boundFile := range bindFiles {
+ if haveMount(boundFile) {
+ // Already have something to mount there, so skip this one.
+ continue
+ }
+ mounts = append(mounts, specs.Mount{
+ Source: boundFile,
+ Destination: boundFile,
+ Type: "bind",
+ Options: []string{"rbind", "ro"},
+ })
+ }
+
+ cdir, err := b.store.ContainerDirectory(b.ContainerID)
+ if err != nil {
+ return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
+ }
+
+ // Add secrets mounts
+ mountsFiles := []string{OverrideMountsFile, b.DefaultMountsFilePath}
+ for _, file := range mountsFiles {
+ secretMounts, err := secretMounts(file, b.MountLabel, cdir)
+ if err != nil {
+ logrus.Warn("error mounting secrets, skipping...")
+ continue
+ }
+ for _, mount := range secretMounts {
+ if haveMount(mount.Destination) {
+ continue
+ }
+ mounts = append(mounts, mount)
+ }
+ }
+ // Add temporary copies of the contents of volume locations at the
+ // volume locations, unless we already have something there.
+ for _, volume := range builtinVolumes {
+ if haveMount(volume) {
+ // Already mounting something there, no need to bother.
+ continue
+ }
+ subdir := digest.Canonical.FromString(volume).Hex()
+ volumePath := filepath.Join(cdir, "buildah-volumes", subdir)
+ // If we need to, initialize the volume path's initial contents.
+ if _, err = os.Stat(volumePath); os.IsNotExist(err) {
+ if err = os.MkdirAll(volumePath, 0755); err != nil {
+ return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
+ }
+ if err = label.Relabel(volumePath, b.MountLabel, false); err != nil {
+ return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
+ }
+ srcPath := filepath.Join(mountPoint, volume)
+ if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
+ return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
+ }
+
+ }
+ // Add the bind mount.
+ mounts = append(mounts, specs.Mount{
+ Source: volumePath,
+ Destination: volume,
+ Type: "bind",
+ Options: []string{"bind"},
+ })
+ }
+ // Bind mount volumes given by the user at execution
+ var options []string
+ for _, i := range volumeMounts {
+ spliti := strings.Split(i, ":")
+ if len(spliti) > 2 {
+ options = strings.Split(spliti[2], ",")
+ }
+ if haveMount(spliti[1]) {
+ continue
+ }
+ options = append(options, "rbind")
+ var foundrw, foundro, foundz, foundZ bool
+ var rootProp string
+ for _, opt := range options {
+ switch opt {
+ case "rw":
+ foundrw = true
+ case "ro":
+ foundro = true
+ case "z":
+ foundz = true
+ case "Z":
+ foundZ = true
+ case "private", "rprivate", "slave", "rslave", "shared", "rshared":
+ rootProp = opt
+ }
+ }
+ if !foundrw && !foundro {
+ options = append(options, "rw")
+ }
+ if foundz {
+ if err := label.Relabel(spliti[0], spec.Linux.MountLabel, true); err != nil {
+ return errors.Wrapf(err, "relabel failed %q", spliti[0])
+ }
+ }
+ if foundZ {
+ if err := label.Relabel(spliti[0], spec.Linux.MountLabel, false); err != nil {
+ return errors.Wrapf(err, "relabel failed %q", spliti[0])
+ }
+ }
+ if rootProp == "" {
+ options = append(options, "private")
+ }
+
+ mounts = append(mounts, specs.Mount{
+ Destination: spliti[1],
+ Type: "bind",
+ Source: spliti[0],
+ Options: options,
+ })
+ }
+ // Set the list in the spec.
+ spec.Mounts = mounts
+ return nil
+}
+
+// Run runs the specified command in the container's root filesystem.
+func (b *Builder) Run(command []string, options RunOptions) error {
+ var user specs.User
+ path, err := ioutil.TempDir(os.TempDir(), Package)
+ if err != nil {
+ return err
+ }
+ logrus.Debugf("using %q to hold bundle data", path)
+ defer func() {
+ if err2 := os.RemoveAll(path); err2 != nil {
+ logrus.Errorf("error removing %q: %v", path, err2)
+ }
+ }()
+ g := generate.New()
+
+ for _, envSpec := range append(b.Env(), options.Env...) {
+ env := strings.SplitN(envSpec, "=", 2)
+ if len(env) > 1 {
+ g.AddProcessEnv(env[0], env[1])
+ }
+ }
+
+ if b.CommonBuildOpts == nil {
+ return errors.Errorf("Invalid format on container you must recreate the container")
+ }
+
+ if err := addCommonOptsToSpec(b.CommonBuildOpts, &g); err != nil {
+ return err
+ }
+
+ if len(command) > 0 {
+ g.SetProcessArgs(command)
+ } else {
+ cmd := b.Cmd()
+ if len(options.Cmd) > 0 {
+ cmd = options.Cmd
+ }
+ entrypoint := b.Entrypoint()
+ if len(options.Entrypoint) > 0 {
+ entrypoint = options.Entrypoint
+ }
+ g.SetProcessArgs(append(entrypoint, cmd...))
+ }
+ if options.WorkingDir != "" {
+ g.SetProcessCwd(options.WorkingDir)
+ } else if b.WorkDir() != "" {
+ g.SetProcessCwd(b.WorkDir())
+ }
+ if options.Hostname != "" {
+ g.SetHostname(options.Hostname)
+ } else if b.Hostname() != "" {
+ g.SetHostname(b.Hostname())
+ }
+ g.SetProcessSelinuxLabel(b.ProcessLabel)
+ g.SetLinuxMountLabel(b.MountLabel)
+ mountPoint, err := b.Mount(b.MountLabel)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err2 := b.Unmount(); err2 != nil {
+ logrus.Errorf("error unmounting container: %v", err2)
+ }
+ }()
+ for _, mp := range []string{
+ "/proc/kcore",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/proc/scsi",
+ "/sys/firmware",
+ } {
+ g.AddLinuxMaskedPaths(mp)
+ }
+
+ for _, rp := range []string{
+ "/proc/asound",
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger",
+ } {
+ g.AddLinuxReadonlyPaths(rp)
+ }
+ g.SetRootPath(mountPoint)
+ switch options.Terminal {
+ case DefaultTerminal:
+ g.SetProcessTerminal(terminal.IsTerminal(int(os.Stdout.Fd())))
+ case WithTerminal:
+ g.SetProcessTerminal(true)
+ case WithoutTerminal:
+ g.SetProcessTerminal(false)
+ }
+ if !options.NetworkDisabled {
+ if err = g.RemoveLinuxNamespace("network"); err != nil {
+ return errors.Wrapf(err, "error removing network namespace for run")
+ }
+ }
+ user, err = b.user(mountPoint, options.User)
+ if err != nil {
+ return err
+ }
+ g.SetProcessUID(user.UID)
+ g.SetProcessGID(user.GID)
+ spec := g.Spec()
+ if spec.Process.Cwd == "" {
+ spec.Process.Cwd = DefaultWorkingDir
+ }
+ if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil {
+ return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd)
+ }
+
+ //Security Opts
+ g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile)
+
+ // HANDLE SECCOMP
+ if b.CommonBuildOpts.SeccompProfilePath != "unconfined" {
+ if b.CommonBuildOpts.SeccompProfilePath != "" {
+ seccompProfile, err := ioutil.ReadFile(b.CommonBuildOpts.SeccompProfilePath)
+ if err != nil {
+ return errors.Wrapf(err, "opening seccomp profile (%s) failed", b.CommonBuildOpts.SeccompProfilePath)
+ }
+ seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
+ if err != nil {
+ return errors.Wrapf(err, "loading seccomp profile (%s) failed", b.CommonBuildOpts.SeccompProfilePath)
+ }
+ spec.Linux.Seccomp = seccompConfig
+ } else {
+ seccompConfig, err := seccomp.GetDefaultProfile(spec)
+ if err != nil {
+ return errors.Wrapf(err, "loading seccomp profile (%s) failed", b.CommonBuildOpts.SeccompProfilePath)
+ }
+ spec.Linux.Seccomp = seccompConfig
+ }
+ }
+
+ cgroupMnt := specs.Mount{
+ Destination: "/sys/fs/cgroup",
+ Type: "cgroup",
+ Source: "cgroup",
+ Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
+ }
+ g.AddMount(cgroupMnt)
+
+ bindFiles := []string{"/etc/hosts", "/etc/resolv.conf"}
+ err = b.setupMounts(mountPoint, spec, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize)
+ if err != nil {
+ return errors.Wrapf(err, "error resolving mountpoints for container")
+ }
+ specbytes, err := json.Marshal(spec)
+ if err != nil {
+ return err
+ }
+ err = ioutils.AtomicWriteFile(filepath.Join(path, "config.json"), specbytes, 0600)
+ if err != nil {
+ return errors.Wrapf(err, "error storing runtime configuration")
+ }
+ logrus.Debugf("config = %v", string(specbytes))
+ runtime := options.Runtime
+ if runtime == "" {
+ runtime = DefaultRuntime
+ }
+ args := append(options.Args, "run", "-b", path, Package+"-"+b.ContainerID)
+ cmd := exec.Command(runtime, args...)
+ cmd.Dir = mountPoint
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ if options.Quiet {
+ cmd.Stdout = nil
+ }
+ cmd.Stderr = os.Stderr
+ err = cmd.Run()
+ if err != nil {
+ logrus.Debugf("error running runc %v: %v", spec.Process.Args, err)
+ }
+ return err
+}
diff --git a/vendor/github.com/projectatomic/buildah/secrets.go b/vendor/github.com/projectatomic/buildah/secrets.go
new file mode 100644
index 000000000..087bf6ba5
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/secrets.go
@@ -0,0 +1,198 @@
+package buildah
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ rspec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // DefaultMountsFile holds the default mount paths in the form
+ // "host_path:container_path"
+ DefaultMountsFile = "/usr/share/containers/mounts.conf"
+ // OverrideMountsFile holds the default mount paths in the form
+ // "host_path:container_path" overriden by the user
+ OverrideMountsFile = "/etc/containers/mounts.conf"
+)
+
+// secretData stores the name of the file and the content read from it
+type secretData struct {
+ name string
+ data []byte
+}
+
+// saveTo saves secret data to given directory
+func (s secretData) saveTo(dir string) error {
+ path := filepath.Join(dir, s.name)
+ if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return ioutil.WriteFile(path, s.data, 0700)
+}
+
+func readAll(root, prefix string) ([]secretData, error) {
+ path := filepath.Join(root, prefix)
+
+ data := []secretData{}
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return data, nil
+ }
+
+ return nil, err
+ }
+
+ for _, f := range files {
+ fileData, err := readFile(root, filepath.Join(prefix, f.Name()))
+ if err != nil {
+ // If the file did not exist, might be a dangling symlink
+ // Ignore the error
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ data = append(data, fileData...)
+ }
+
+ return data, nil
+}
+
+func readFile(root, name string) ([]secretData, error) {
+ path := filepath.Join(root, name)
+
+ s, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ if s.IsDir() {
+ dirData, err := readAll(root, name)
+ if err != nil {
+ return nil, err
+ }
+ return dirData, nil
+ }
+ bytes, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return []secretData{{name: name, data: bytes}}, nil
+}
+
+func getHostSecretData(hostDir string) ([]secretData, error) {
+ var allSecrets []secretData
+ hostSecrets, err := readAll(hostDir, "")
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to read secrets from %q", hostDir)
+ }
+ return append(allSecrets, hostSecrets...), nil
+}
+
+func getMounts(filePath string) []string {
+ file, err := os.Open(filePath)
+ if err != nil {
+ logrus.Warnf("file %q not found, skipping...", filePath)
+ return nil
+ }
+ defer file.Close()
+ scanner := bufio.NewScanner(file)
+ if err = scanner.Err(); err != nil {
+ logrus.Warnf("error reading file %q, skipping...", filePath)
+ return nil
+ }
+ var mounts []string
+ for scanner.Scan() {
+ mounts = append(mounts, scanner.Text())
+ }
+ return mounts
+}
+
+// getHostAndCtrDir separates the host:container paths
+func getMountsMap(path string) (string, string, error) {
+ arr := strings.SplitN(path, ":", 2)
+ if len(arr) == 2 {
+ return arr[0], arr[1], nil
+ }
+ return "", "", errors.Errorf("unable to get host and container dir")
+}
+
+// secretMount copies the contents of host directory to container directory
+// and returns a list of mounts
+func secretMounts(filePath, mountLabel, containerWorkingDir string) ([]rspec.Mount, error) {
+ var mounts []rspec.Mount
+ defaultMountsPaths := getMounts(filePath)
+ for _, path := range defaultMountsPaths {
+ hostDir, ctrDir, err := getMountsMap(path)
+ if err != nil {
+ return nil, err
+ }
+ // skip if the hostDir path doesn't exist
+ if _, err = os.Stat(hostDir); os.IsNotExist(err) {
+ logrus.Warnf("%q doesn't exist, skipping", hostDir)
+ continue
+ }
+
+ ctrDirOnHost := filepath.Join(containerWorkingDir, ctrDir)
+ if err = os.RemoveAll(ctrDirOnHost); err != nil {
+ return nil, fmt.Errorf("remove container directory failed: %v", err)
+ }
+
+ if err = os.MkdirAll(ctrDirOnHost, 0755); err != nil {
+ return nil, fmt.Errorf("making container directory failed: %v", err)
+ }
+
+ hostDir, err = resolveSymbolicLink(hostDir)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := getHostSecretData(hostDir)
+ if err != nil {
+ return nil, errors.Wrapf(err, "getting host secret data failed")
+ }
+ for _, s := range data {
+ if err := s.saveTo(ctrDirOnHost); err != nil {
+ return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOnHost)
+ }
+ }
+
+ err = label.Relabel(ctrDirOnHost, mountLabel, false)
+ if err != nil {
+ return nil, errors.Wrap(err, "error applying correct labels")
+ }
+
+ m := rspec.Mount{
+ Source: ctrDirOnHost,
+ Destination: ctrDir,
+ Type: "bind",
+ Options: []string{"bind"},
+ }
+
+ mounts = append(mounts, m)
+ }
+ return mounts, nil
+}
+
+// resolveSymbolicLink resolves a possbile symlink path. If the path is a symlink, returns resolved
+// path; if not, returns the original path.
+func resolveSymbolicLink(path string) (string, error) {
+ info, err := os.Lstat(path)
+ if err != nil {
+ return "", err
+ }
+ if info.Mode()&os.ModeSymlink != os.ModeSymlink {
+ return path, nil
+ }
+ return filepath.EvalSymlinks(path)
+}
diff --git a/vendor/github.com/projectatomic/buildah/unmount.go b/vendor/github.com/projectatomic/buildah/unmount.go
new file mode 100644
index 000000000..e1578bf71
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/unmount.go
@@ -0,0 +1,11 @@
+package buildah
+
+// Unmount unmounts a build container.
+func (b *Builder) Unmount() error {
+ err := b.store.Unmount(b.ContainerID)
+ if err == nil {
+ b.MountPoint = ""
+ err = b.Save()
+ }
+ return err
+}
diff --git a/vendor/github.com/projectatomic/buildah/util.go b/vendor/github.com/projectatomic/buildah/util.go
new file mode 100644
index 000000000..33b5b9e83
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/util.go
@@ -0,0 +1,34 @@
+package buildah
+
+import (
+ "github.com/containers/storage/pkg/chrootarchive"
+ "github.com/containers/storage/pkg/reexec"
+)
+
+var (
+ // CopyWithTar defines the copy method to use.
+ copyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
+ copyFileWithTar = chrootarchive.NewArchiver(nil).CopyFileWithTar
+ untarPath = chrootarchive.NewArchiver(nil).UntarPath
+)
+
+// InitReexec is a wrapper for reexec.Init(). It should be called at
+// the start of main(), and if it returns true, main() should return
+// immediately.
+func InitReexec() bool {
+ return reexec.Init()
+}
+
+func copyStringStringMap(m map[string]string) map[string]string {
+ n := map[string]string{}
+ for k, v := range m {
+ n[k] = v
+ }
+ return n
+}
+
+func copyStringSlice(s []string) []string {
+ t := make([]string, len(s))
+ copy(t, s)
+ return t
+}
diff --git a/vendor/github.com/projectatomic/buildah/util/util.go b/vendor/github.com/projectatomic/buildah/util/util.go
new file mode 100644
index 000000000..49d6fcc4e
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/util/util.go
@@ -0,0 +1,221 @@
+package util
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/containers/image/directory"
+ dockerarchive "github.com/containers/image/docker/archive"
+ "github.com/containers/image/docker/reference"
+ ociarchive "github.com/containers/image/oci/archive"
+ "github.com/containers/image/pkg/sysregistries"
+ is "github.com/containers/image/storage"
+ "github.com/containers/image/tarball"
+ "github.com/containers/image/types"
+ "github.com/containers/storage"
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ minimumTruncatedIDLength = 3
+)
+
+var (
+ // RegistryDefaultPathPrefix contains a per-registry listing of default prefixes
+ // to prepend to image names that only contain a single path component.
+ RegistryDefaultPathPrefix = map[string]string{
+ "index.docker.io": "library",
+ "docker.io": "library",
+ }
+ // Transports contains the possible transports used for images
+ Transports = map[string]string{
+ dockerarchive.Transport.Name(): "",
+ ociarchive.Transport.Name(): "",
+ directory.Transport.Name(): "",
+ tarball.Transport.Name(): "",
+ }
+ // DockerArchive is the transport we prepend to an image name
+ // when saving to docker-archive
+ DockerArchive = dockerarchive.Transport.Name()
+ // OCIArchive is the transport we prepend to an image name
+ // when saving to oci-archive
+ OCIArchive = ociarchive.Transport.Name()
+ // DirTransport is the transport for pushing and pulling
+ // images to and from a directory
+ DirTransport = directory.Transport.Name()
+ // TarballTransport is the transport for importing a tar archive
+ // and creating a filesystem image
+ TarballTransport = tarball.Transport.Name()
+)
+
+// ResolveName checks if name is a valid image name, and if that name doesn't include a domain
+// portion, returns a list of the names which it might correspond to in the registries.
+func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) []string {
+ if name == "" {
+ return nil
+ }
+
+ // Maybe it's a truncated image ID. Don't prepend a registry name, then.
+ if len(name) >= minimumTruncatedIDLength {
+ if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) {
+ // It's a truncated version of the ID of an image that's present in local storage;
+ // we need to expand the ID.
+ return []string{img.ID}
+ }
+ }
+
+ // If the image is from a different transport
+ split := strings.SplitN(name, ":", 2)
+ if len(split) == 2 {
+ if _, ok := Transports[split[0]]; ok {
+ return []string{split[1]}
+ }
+ }
+
+ // If the image name already included a domain component, we're done.
+ named, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return []string{name}
+ }
+ if named.String() == name {
+ // Parsing produced the same result, so there was a domain name in there to begin with.
+ return []string{name}
+ }
+ if reference.Domain(named) != "" && RegistryDefaultPathPrefix[reference.Domain(named)] != "" {
+ // If this domain can cause us to insert something in the middle, check if that happened.
+ repoPath := reference.Path(named)
+ domain := reference.Domain(named)
+ defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/"
+ if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):]) == name {
+ // Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with.
+ return []string{name}
+ }
+ }
+
+ // Figure out the list of registries.
+ registries, err := sysregistries.GetRegistries(sc)
+ if err != nil {
+ logrus.Debugf("unable to complete image name %q: %v", name, err)
+ return []string{name}
+ }
+ if sc.DockerInsecureSkipTLSVerify {
+ if unverifiedRegistries, err := sysregistries.GetInsecureRegistries(sc); err == nil {
+ registries = append(registries, unverifiedRegistries...)
+ }
+ }
+
+ // Create all of the combinations. Some registries need an additional component added, so
+ // use our lookaside map to keep track of them. If there are no configured registries, at
+ // least return the name as it was passed to us.
+ candidates := []string{}
+ for _, registry := range append([]string{firstRegistry}, registries...) {
+ if registry == "" {
+ continue
+ }
+ middle := ""
+ if prefix, ok := RegistryDefaultPathPrefix[registry]; ok && strings.IndexRune(name, '/') == -1 {
+ middle = prefix
+ }
+ candidate := path.Join(registry, middle, name)
+ candidates = append(candidates, candidate)
+ }
+ if len(candidates) == 0 {
+ candidates = append(candidates, name)
+ }
+ return candidates
+}
+
+// ExpandNames takes unqualified names, parses them as image names, and returns
+// the fully expanded result, including a tag. Names which don't include a registry
+// name will be marked for the most-preferred registry (i.e., the first one in our
+// configuration).
+func ExpandNames(names []string) ([]string, error) {
+ expanded := make([]string, 0, len(names))
+ for _, n := range names {
+ name, err := reference.ParseNormalizedNamed(n)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing name %q", n)
+ }
+ name = reference.TagNameOnly(name)
+ tag := ""
+ digest := ""
+ if tagged, ok := name.(reference.NamedTagged); ok {
+ tag = ":" + tagged.Tag()
+ }
+ if digested, ok := name.(reference.Digested); ok {
+ digest = "@" + digested.Digest().String()
+ }
+ expanded = append(expanded, name.Name()+tag+digest)
+ }
+ return expanded, nil
+}
+
+// FindImage locates the locally-stored image which corresponds to a given name.
+func FindImage(store storage.Store, image string) (*storage.Image, error) {
+ var img *storage.Image
+ ref, err := is.Transport.ParseStoreReference(store, image)
+ if err == nil {
+ img, err = is.Transport.GetStoreImage(store, ref)
+ }
+ if err != nil {
+ img2, err2 := store.Image(image)
+ if err2 != nil {
+ if ref == nil {
+ return nil, errors.Wrapf(err, "error parsing reference to image %q", image)
+ }
+ return nil, errors.Wrapf(err, "unable to locate image %q", image)
+ }
+ img = img2
+ }
+ return img, nil
+}
+
+// AddImageNames adds the specified names to the specified image.
+func AddImageNames(store storage.Store, image *storage.Image, addNames []string) error {
+ names, err := ExpandNames(addNames)
+ if err != nil {
+ return err
+ }
+ err = store.SetNames(image.ID, append(image.Names, names...))
+ if err != nil {
+ return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID)
+ }
+ return nil
+}
+
+// GetFailureCause checks the type of the error "err" and returns a new
+// error message that reflects the reason of the failure.
+// In case err type is not a familiar one the error "defaultError" is returned.
+func GetFailureCause(err, defaultError error) error {
+ switch nErr := errors.Cause(err).(type) {
+ case errcode.Errors:
+ return err
+ case errcode.Error, *url.Error:
+ return nErr
+ default:
+ return defaultError
+ }
+}
+
+// GetLocalTime discover the UTC offset and then add that to the
+// passed in time to arrive at the local time.
+func GetLocalTime(localTime time.Time) time.Time {
+ t := time.Now()
+ _, offset := t.Local().Zone()
+ localTime = localTime.Add(time.Second * time.Duration(offset))
+ return localTime
+}
+
+// WriteError writes `lastError` into `w` if not nil and return the next error `err`
+func WriteError(w io.Writer, err error, lastError error) error {
+ if lastError != nil {
+ fmt.Fprintln(w, lastError)
+ }
+ return err
+}
diff --git a/vendor/github.com/projectatomic/buildah/vendor.conf b/vendor/github.com/projectatomic/buildah/vendor.conf
new file mode 100644
index 000000000..be0b04e4a
--- /dev/null
+++ b/vendor/github.com/projectatomic/buildah/vendor.conf
@@ -0,0 +1,57 @@
+github.com/BurntSushi/toml master
+github.com/Nvveen/Gotty master
+github.com/blang/semver master
+github.com/containers/image master
+github.com/containers/storage master
+github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
+github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9
+github.com/docker/engine-api master
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
+github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
+github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
+github.com/fsouza/go-dockerclient master
+github.com/ghodss/yaml master
+github.com/golang/glog master
+github.com/gorilla/context master
+github.com/gorilla/mux master
+github.com/hashicorp/go-cleanhttp master
+github.com/imdario/mergo master
+github.com/mattn/go-runewidth master
+github.com/mattn/go-shellwords master
+github.com/mistifyio/go-zfs master
+github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
+github.com/mtrmac/gpgme master
+github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
+github.com/opencontainers/image-spec v1.0.0
+github.com/opencontainers/runc master
+github.com/opencontainers/runtime-spec v1.0.0
+github.com/opencontainers/runtime-tools master
+github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd
+github.com/openshift/imagebuilder master
+github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
+github.com/pborman/uuid master
+github.com/pkg/errors master
+github.com/sirupsen/logrus master
+github.com/syndtr/gocapability master
+github.com/tchap/go-patricia master
+github.com/urfave/cli master
+github.com/vbatts/tar-split v0.10.2
+golang.org/x/crypto master
+golang.org/x/net master
+golang.org/x/sys master
+golang.org/x/text master
+gopkg.in/cheggaaa/pb.v1 v1.0.13
+gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
+k8s.io/apimachinery master
+k8s.io/client-go master
+k8s.io/kubernetes master
+github.com/hashicorp/go-multierror master
+github.com/hashicorp/errwrap master
+github.com/xeipuuv/gojsonschema master
+github.com/xeipuuv/gojsonreference master
+github.com/containerd/continuity master
+github.com/gogo/protobuf master
+github.com/xeipuuv/gojsonpointer master
+github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
+github.com/projectatomic/libpod master
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
index 53bcb024d..a62741814 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -43,7 +43,7 @@ func (e VersionError) Error() string {
if e.minimum != "" {
format += e.minimum + ": "
} else {
- format += "2.1.0: "
+ format += "2.2.0: "
}
format += "detected %d.%d.%d"
return fmt.Sprintf(format, verMajor, verMinor, verMicro)
@@ -76,8 +76,8 @@ type ScmpSyscall int32
const (
// Valid architectures recognized by libseccomp
- // ARM64 and all MIPS architectures are unsupported by versions of the
- // library before v2.2 and will return errors if used
+ // PowerPC and S390(x) architectures are unavailable below library version
+ // v2.3.0 and will returns errors if used with incompatible libraries
// ArchInvalid is a placeholder to ensure uninitialized ScmpArch
// variables are invalid
@@ -211,7 +211,7 @@ func GetArchFromString(arch string) (ScmpArch, error) {
case "s390x":
return ArchS390X, nil
default:
- return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %s", arch)
+ return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch)
}
}
@@ -255,7 +255,7 @@ func (a ScmpArch) String() string {
case ArchInvalid:
return "Invalid architecture"
default:
- return "Unknown architecture"
+ return fmt.Sprintf("Unknown architecture %#x", uint(a))
}
}
@@ -279,7 +279,7 @@ func (a ScmpCompareOp) String() string {
case CompareInvalid:
return "Invalid comparison operator"
default:
- return "Unrecognized comparison operator"
+ return fmt.Sprintf("Unrecognized comparison operator %#x", uint(a))
}
}
@@ -298,7 +298,7 @@ func (a ScmpAction) String() string {
case ActAllow:
return "Action: Allow system call"
default:
- return "Unrecognized Action"
+ return fmt.Sprintf("Unrecognized Action %#x", uint(a))
}
}
@@ -324,7 +324,7 @@ func (a ScmpAction) GetReturnCode() int16 {
// GetLibraryVersion returns the version of the library the bindings are built
// against.
// The version is formatted as follows: Major.Minor.Micro
-func GetLibraryVersion() (major, minor, micro int) {
+func GetLibraryVersion() (major, minor, micro uint) {
return verMajor, verMinor, verMicro
}
@@ -350,7 +350,7 @@ func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) {
cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s))
if cString == nil {
- return "", fmt.Errorf("could not resolve syscall name")
+ return "", fmt.Errorf("could not resolve syscall name for %#x", int32(s))
}
defer C.free(unsafe.Pointer(cString))
@@ -373,7 +373,7 @@ func GetSyscallFromName(name string) (ScmpSyscall, error) {
result := C.seccomp_syscall_resolve_name(cString)
if result == scmpError {
- return 0, fmt.Errorf("could not resolve name to syscall")
+ return 0, fmt.Errorf("could not resolve name to syscall: %q", name)
}
return ScmpSyscall(result), nil
@@ -397,7 +397,7 @@ func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) {
result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString)
if result == scmpError {
- return 0, fmt.Errorf("could not resolve name to syscall")
+ return 0, fmt.Errorf("could not resolve name to syscall: %q on %v", name, arch)
}
return ScmpSyscall(result), nil
@@ -426,9 +426,9 @@ func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCo
if comparison == CompareInvalid {
return condStruct, fmt.Errorf("invalid comparison operator")
} else if arg > 5 {
- return condStruct, fmt.Errorf("syscalls only have up to 6 arguments")
+ return condStruct, fmt.Errorf("syscalls only have up to 6 arguments (%d given)", arg)
} else if len(values) > 2 {
- return condStruct, fmt.Errorf("conditions can have at most 2 arguments")
+ return condStruct, fmt.Errorf("conditions can have at most 2 arguments (%d given)", len(values))
} else if len(values) == 0 {
return condStruct, fmt.Errorf("must provide at least one value to compare against")
}
@@ -494,6 +494,13 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
filter.valid = true
runtime.SetFinalizer(filter, filterFinalizer)
+ // Enable TSync so all goroutines will receive the same rules
+ // If the kernel does not support TSYNC, allow us to continue without error
+ if err := filter.setFilterAttr(filterAttrTsync, 0x1); err != nil && err != syscall.ENOTSUP {
+ filter.Release()
+ return nil, fmt.Errorf("could not create filter - error setting tsync bit: %v", err)
+ }
+
return filter, nil
}
@@ -550,7 +557,7 @@ func (f *ScmpFilter) Release() {
// The source filter src will be released as part of the process, and will no
// longer be usable or valid after this call.
// To be merged, filters must NOT share any architectures, and all their
-// attributes (Default Action, Bad Arch Action, No New Privs and TSync bools)
+// attributes (Default Action, Bad Arch Action, and No New Privs bools)
// must match.
// The filter src will be merged into the filter this is called on.
// The architectures of the src filter not present in the destination, and all
@@ -723,30 +730,6 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
return true, nil
}
-// GetTsyncBit returns whether Thread Synchronization will be enabled on the
-// filter being loaded, or an error if an issue was encountered retrieving the
-// value.
-// Thread Sync ensures that all members of the thread group of the calling
-// process will share the same Seccomp filter set.
-// Tsync is a fairly recent addition to the Linux kernel and older kernels
-// lack support. If the running kernel does not support Tsync and it is
-// requested in a filter, Libseccomp will not enable TSync support and will
-// proceed as normal.
-// This function is unavailable before v2.2 of libseccomp and will return an
-// error.
-func (f *ScmpFilter) GetTsyncBit() (bool, error) {
- tSync, err := f.getFilterAttr(filterAttrTsync)
- if err != nil {
- return false, err
- }
-
- if tSync == 0 {
- return false, nil
- }
-
- return true, nil
-}
-
// SetBadArchAction sets the default action taken on a syscall for an
// architecture not in the filter, or an error if an issue was encountered
// setting the value.
@@ -773,27 +756,6 @@ func (f *ScmpFilter) SetNoNewPrivsBit(state bool) error {
return f.setFilterAttr(filterAttrNNP, toSet)
}
-// SetTsync sets whether Thread Synchronization will be enabled on the filter
-// being loaded. Returns an error if setting Tsync failed, or the filter is
-// invalid.
-// Thread Sync ensures that all members of the thread group of the calling
-// process will share the same Seccomp filter set.
-// Tsync is a fairly recent addition to the Linux kernel and older kernels
-// lack support. If the running kernel does not support Tsync and it is
-// requested in a filter, Libseccomp will not enable TSync support and will
-// proceed as normal.
-// This function is unavailable before v2.2 of libseccomp and will return an
-// error.
-func (f *ScmpFilter) SetTsync(enable bool) error {
- var toSet C.uint32_t = 0x0
-
- if enable {
- toSet = 0x1
- }
-
- return f.setFilterAttr(filterAttrTsync, toSet)
-}
-
// SetSyscallPriority sets a syscall's priority.
// This provides a hint to the filter generator in libseccomp about the
// importance of this syscall. High-priority syscalls are placed
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
index b0caac91b..dbac0cc30 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -20,43 +20,15 @@ import (
#include <seccomp.h>
#if SCMP_VER_MAJOR < 2
-#error Minimum supported version of Libseccomp is v2.1.0
-#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 1
-#error Minimum supported version of Libseccomp is v2.1.0
+#error Minimum supported version of Libseccomp is v2.2.0
+#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
+#error Minimum supported version of Libseccomp is v2.2.0
#endif
#define ARCH_BAD ~0
const uint32_t C_ARCH_BAD = ARCH_BAD;
-#ifndef SCMP_ARCH_AARCH64
-#define SCMP_ARCH_AARCH64 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPS
-#define SCMP_ARCH_MIPS ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPS64
-#define SCMP_ARCH_MIPS64 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPS64N32
-#define SCMP_ARCH_MIPS64N32 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPSEL
-#define SCMP_ARCH_MIPSEL ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPSEL64
-#define SCMP_ARCH_MIPSEL64 ARCH_BAD
-#endif
-
-#ifndef SCMP_ARCH_MIPSEL64N32
-#define SCMP_ARCH_MIPSEL64N32 ARCH_BAD
-#endif
-
#ifndef SCMP_ARCH_PPC
#define SCMP_ARCH_PPC ARCH_BAD
#endif
@@ -101,12 +73,6 @@ const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW;
-// If TSync is not supported, make sure it doesn't map to a supported filter attribute
-// Don't worry about major version < 2, the minimum version checks should catch that case
-#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
-#define SCMP_FLTATR_CTL_TSYNC _SCMP_CMP_MIN
-#endif
-
const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
@@ -124,25 +90,61 @@ const int C_VERSION_MAJOR = SCMP_VER_MAJOR;
const int C_VERSION_MINOR = SCMP_VER_MINOR;
const int C_VERSION_MICRO = SCMP_VER_MICRO;
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR >= 3
+unsigned int get_major_version()
+{
+ return seccomp_version()->major;
+}
+
+unsigned int get_minor_version()
+{
+ return seccomp_version()->minor;
+}
+
+unsigned int get_micro_version()
+{
+ return seccomp_version()->micro;
+}
+#else
+unsigned int get_major_version()
+{
+ return (unsigned int)C_VERSION_MAJOR;
+}
+
+unsigned int get_minor_version()
+{
+ return (unsigned int)C_VERSION_MINOR;
+}
+
+unsigned int get_micro_version()
+{
+ return (unsigned int)C_VERSION_MICRO;
+}
+#endif
+
typedef struct scmp_arg_cmp* scmp_cast_t;
-// Wrapper to create an scmp_arg_cmp struct
-void*
-make_struct_arg_cmp(
- unsigned int arg,
- int compare,
- uint64_t a,
- uint64_t b
- )
+void* make_arg_cmp_array(unsigned int length)
{
- struct scmp_arg_cmp *s = malloc(sizeof(struct scmp_arg_cmp));
+ return calloc(length, sizeof(struct scmp_arg_cmp));
+}
- s->arg = arg;
- s->op = compare;
- s->datum_a = a;
- s->datum_b = b;
+// Wrapper to add an scmp_arg_cmp struct to an existing arg_cmp array
+void add_struct_arg_cmp(
+ struct scmp_arg_cmp* arr,
+ unsigned int pos,
+ unsigned int arg,
+ int compare,
+ uint64_t a,
+ uint64_t b
+ )
+{
+ arr[pos].arg = arg;
+ arr[pos].op = compare;
+ arr[pos].datum_a = a;
+ arr[pos].datum_b = b;
- return s;
+ return;
}
*/
import "C"
@@ -177,23 +179,23 @@ var (
// Error thrown on bad filter context
errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
// Constants representing library major, minor, and micro versions
- verMajor = int(C.C_VERSION_MAJOR)
- verMinor = int(C.C_VERSION_MINOR)
- verMicro = int(C.C_VERSION_MICRO)
+ verMajor = uint(C.get_major_version())
+ verMinor = uint(C.get_minor_version())
+ verMicro = uint(C.get_micro_version())
)
// Nonexported functions
// Check if library version is greater than or equal to the given one
-func checkVersionAbove(major, minor, micro int) bool {
+func checkVersionAbove(major, minor, micro uint) bool {
return (verMajor > major) ||
(verMajor == major && verMinor > minor) ||
(verMajor == major && verMinor == minor && verMicro >= micro)
}
-// Ensure that the library is supported, i.e. >= 2.1.0.
+// Ensure that the library is supported, i.e. >= 2.2.0.
func ensureSupportedVersion() error {
- if !checkVersionAbove(2, 1, 0) {
+ if !checkVersionAbove(2, 2, 0) {
return VersionError{}
}
return nil
@@ -215,13 +217,6 @@ func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
return 0x0, errBadFilter
}
- if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
- return 0x0, VersionError{
- message: "thread synchronization attribute is not supported",
- minimum: "2.2.0",
- }
- }
-
var attribute C.uint32_t
retCode := C.seccomp_attr_get(f.filterCtx, attr.toNative(), &attribute)
@@ -241,13 +236,6 @@ func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error
return errBadFilter
}
- if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
- return VersionError{
- message: "thread synchronization attribute is not supported",
- minimum: "2.2.0",
- }
- }
-
retCode := C.seccomp_attr_set(f.filterCtx, attr.toNative(), value)
if retCode != 0 {
return syscall.Errno(-1 * retCode)
@@ -259,12 +247,9 @@ func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error
// DOES NOT LOCK OR CHECK VALIDITY
// Assumes caller has already done this
// Wrapper for seccomp_rule_add_... functions
-func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, cond C.scmp_cast_t) error {
- var length C.uint
- if cond != nil {
- length = 1
- } else {
- length = 0
+func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, length C.uint, cond C.scmp_cast_t) error {
+ if length != 0 && cond == nil {
+ return fmt.Errorf("null conditions list, but length is nonzero")
}
var retCode C.int
@@ -275,9 +260,11 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b
}
if syscall.Errno(-1*retCode) == syscall.EFAULT {
- return fmt.Errorf("unrecognized syscall")
+ return fmt.Errorf("unrecognized syscall %#x", int32(call))
} else if syscall.Errno(-1*retCode) == syscall.EPERM {
return fmt.Errorf("requested action matches default action of filter")
+ } else if syscall.Errno(-1*retCode) == syscall.EINVAL {
+ return fmt.Errorf("two checks on same syscall argument")
} else if retCode != 0 {
return syscall.Errno(-1 * retCode)
}
@@ -295,7 +282,7 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
}
if len(conds) == 0 {
- if err := f.addRuleWrapper(call, action, exact, nil); err != nil {
+ if err := f.addRuleWrapper(call, action, exact, 0, nil); err != nil {
return err
}
} else {
@@ -307,13 +294,20 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
}
}
- for _, cond := range conds {
- cmpStruct := C.make_struct_arg_cmp(C.uint(cond.Argument), cond.Op.toNative(), C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
- defer C.free(cmpStruct)
+ argsArr := C.make_arg_cmp_array(C.uint(len(conds)))
+ if argsArr == nil {
+ return fmt.Errorf("error allocating memory for conditions")
+ }
+ defer C.free(argsArr)
- if err := f.addRuleWrapper(call, action, exact, C.scmp_cast_t(cmpStruct)); err != nil {
- return err
- }
+ for i, cond := range conds {
+ C.add_struct_arg_cmp(C.scmp_cast_t(argsArr), C.uint(i),
+ C.uint(cond.Argument), cond.Op.toNative(),
+ C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
+ }
+
+ if err := f.addRuleWrapper(call, action, exact, C.uint(len(conds)), C.scmp_cast_t(argsArr)); err != nil {
+ return err
}
}
@@ -325,11 +319,11 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b
// Helper - Sanitize Arch token input
func sanitizeArch(in ScmpArch) error {
if in < archStart || in > archEnd {
- return fmt.Errorf("unrecognized architecture")
+ return fmt.Errorf("unrecognized architecture %#x", uint(in))
}
if in.toNative() == C.C_ARCH_BAD {
- return fmt.Errorf("architecture is not supported on this version of the library")
+ return fmt.Errorf("architecture %v is not supported on this version of the library", in)
}
return nil
@@ -338,7 +332,7 @@ func sanitizeArch(in ScmpArch) error {
func sanitizeAction(in ScmpAction) error {
inTmp := in & 0x0000FFFF
if inTmp < actionStart || inTmp > actionEnd {
- return fmt.Errorf("unrecognized action")
+ return fmt.Errorf("unrecognized action %#x", uint(inTmp))
}
if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 {
@@ -350,7 +344,7 @@ func sanitizeAction(in ScmpAction) error {
func sanitizeCompareOp(in ScmpCompareOp) error {
if in < compareOpStart || in > compareOpEnd {
- return fmt.Errorf("unrecognized comparison operator")
+ return fmt.Errorf("unrecognized comparison operator %#x", uint(in))
}
return nil
@@ -393,7 +387,7 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) {
case C.C_ARCH_S390X:
return ArchS390X, nil
default:
- return 0x0, fmt.Errorf("unrecognized architecture")
+ return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a))
}
}
@@ -475,7 +469,7 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
case C.C_ACT_ALLOW:
return ActAllow, nil
default:
- return 0x0, fmt.Errorf("unrecognized action")
+ return 0x0, fmt.Errorf("unrecognized action %#x", uint32(a))
}
}