summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x.tool/lint2
-rw-r--r--Makefile2
-rw-r--r--cmd/podman/build.go15
-rw-r--r--cmd/podman/shared/funcs.go8
-rw-r--r--cmd/podman/shared/funcs_test.go9
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/__init__.py2
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/version_action.py41
-rw-r--r--libpod/container_graph_test.go4
-rw-r--r--libpod/oci.go5
-rw-r--r--libpod/oci_linux.go10
-rw-r--r--pkg/varlinkapi/images.go3
-rw-r--r--vendor.conf3
-rw-r--r--vendor/github.com/containers/buildah/README.md1
-rw-r--r--vendor/github.com/containers/buildah/buildah.go1
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go2
-rw-r--r--vendor/github.com/containers/buildah/common.go28
-rw-r--r--vendor/github.com/containers/buildah/delete.go3
-rw-r--r--vendor/github.com/containers/buildah/image.go1
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go98
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go133
-rw-r--r--vendor/github.com/containers/buildah/new.go81
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go8
-rw-r--r--vendor/github.com/containers/buildah/pull.go5
-rw-r--r--vendor/github.com/containers/buildah/run.go180
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.go4
-rw-r--r--vendor/github.com/containers/buildah/util/util.go61
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf6
-rw-r--r--vendor/github.com/google/shlex/COPYING202
-rw-r--r--vendor/github.com/google/shlex/README2
-rw-r--r--vendor/github.com/google/shlex/shlex.go416
30 files changed, 1123 insertions, 213 deletions
diff --git a/.tool/lint b/.tool/lint
index b7006c8fd..f7bf81c1d 100755
--- a/.tool/lint
+++ b/.tool/lint
@@ -40,6 +40,8 @@ ${LINTER} \
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$'\
--exclude='duplicate of.*_test.go.*\(dupl\)$'\
--exclude='cmd\/client\/.*\.go.*\(dupl\)$'\
+ --exclude='libpod\/.*_easyjson.go:.*'\
+ --exclude='.* other occurrence\(s\) of "(container|host|tmpfs|unknown)" found in: .*\(goconst\)$'\
--exclude='vendor\/.*'\
--exclude='podman\/.*'\
--exclude='server\/seccomp\/.*\.go.*$'\
diff --git a/Makefile b/Makefile
index 2511ffcd7..195d105b7 100644
--- a/Makefile
+++ b/Makefile
@@ -293,7 +293,7 @@ install.tools: .install.gitvalidation .install.gometalinter .install.md2man .ins
if [ ! -x "$(GOBIN)/gometalinter" ]; then \
$(GO) get -u github.com/alecthomas/gometalinter; \
cd $(FIRST_GOPATH)/src/github.com/alecthomas/gometalinter; \
- git checkout 23261fa046586808612c61da7a81d75a658e0814; \
+ git checkout e8d801238da6f0dfd14078d68f9b53fa50a7eeb5; \
$(GO) install github.com/alecthomas/gometalinter; \
$(GOBIN)/gometalinter --install; \
fi
diff --git a/cmd/podman/build.go b/cmd/podman/build.go
index 14bf226f9..880cb892f 100644
--- a/cmd/podman/build.go
+++ b/cmd/podman/build.go
@@ -1,6 +1,11 @@
package main
import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
buildahcli "github.com/containers/buildah/pkg/cli"
@@ -10,15 +15,15 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
)
var (
layerFlags = []cli.Flag{
cli.BoolTFlag{
+ Name: "force-rm",
+ Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful. (default true)",
+ },
+ cli.BoolTFlag{
Name: "layers",
Usage: "cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. ",
},
@@ -230,7 +235,7 @@ func buildCmd(c *cli.Context) error {
Layers: layers,
NoCache: c.Bool("no-cache"),
RemoveIntermediateCtrs: c.BoolT("rm"),
- ForceRmIntermediateCtrs: c.Bool("force-rm"),
+ ForceRmIntermediateCtrs: c.BoolT("force-rm"),
}
if c.Bool("quiet") {
diff --git a/cmd/podman/shared/funcs.go b/cmd/podman/shared/funcs.go
index a92e0d547..8520c0616 100644
--- a/cmd/podman/shared/funcs.go
+++ b/cmd/podman/shared/funcs.go
@@ -5,6 +5,8 @@ import (
"os"
"path/filepath"
"strings"
+
+ "github.com/google/shlex"
)
func substituteCommand(cmd string) (string, error) {
@@ -42,7 +44,11 @@ func GenerateCommand(command, imageName, name string) ([]string, error) {
if name == "" {
name = imageName
}
- cmd := strings.Split(command, " ")
+
+ cmd, err := shlex.Split(command)
+ if err != nil {
+ return nil, err
+ }
prog, err := substituteCommand(cmd[0])
if err != nil {
diff --git a/cmd/podman/shared/funcs_test.go b/cmd/podman/shared/funcs_test.go
index 596df84e8..7506b9d9c 100644
--- a/cmd/podman/shared/funcs_test.go
+++ b/cmd/podman/shared/funcs_test.go
@@ -18,10 +18,11 @@ var (
)
func TestGenerateCommand(t *testing.T) {
- inputCommand := "docker run -it --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE echo install"
- correctCommand := "/proc/self/exe run -it --name bar -e NAME=bar -e IMAGE=foo foo echo install"
+ inputCommand := "docker run -it --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE echo \"hello world\""
+ correctCommand := "/proc/self/exe run -it --name bar -e NAME=bar -e IMAGE=foo foo echo hello world"
newCommand, err := GenerateCommand(inputCommand, "foo", "bar")
assert.Nil(t, err)
+ assert.Equal(t, "hello world", newCommand[11])
assert.Equal(t, correctCommand, strings.Join(newCommand, " "))
}
@@ -108,8 +109,8 @@ func TestGenerateCommandNoSetName(t *testing.T) {
}
func TestGenerateCommandNoName(t *testing.T) {
- inputCommand := "docker run -it -e IMAGE=IMAGE IMAGE echo install"
- correctCommand := "/proc/self/exe run -it -e IMAGE=foo foo echo install"
+ inputCommand := "docker run -it -e IMAGE=IMAGE IMAGE echo install"
+ correctCommand := "/proc/self/exe run -it -e IMAGE=foo foo echo install"
newCommand, err := GenerateCommand(inputCommand, "foo", "")
assert.Nil(t, err)
assert.Equal(t, correctCommand, strings.Join(newCommand, " "))
diff --git a/contrib/python/pypodman/pypodman/lib/actions/__init__.py b/contrib/python/pypodman/pypodman/lib/actions/__init__.py
index 2668cd8ff..bc863ce6d 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/__init__.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/__init__.py
@@ -22,6 +22,7 @@ from pypodman.lib.actions.rm_action import Rm
from pypodman.lib.actions.rmi_action import Rmi
from pypodman.lib.actions.run_action import Run
from pypodman.lib.actions.search_action import Search
+from pypodman.lib.actions.version_action import Version
__all__ = [
'Attach',
@@ -47,4 +48,5 @@ __all__ = [
'Rmi',
'Run',
'Search',
+ 'Version',
]
diff --git a/contrib/python/pypodman/pypodman/lib/actions/version_action.py b/contrib/python/pypodman/pypodman/lib/actions/version_action.py
new file mode 100644
index 000000000..12b6dc576
--- /dev/null
+++ b/contrib/python/pypodman/pypodman/lib/actions/version_action.py
@@ -0,0 +1,41 @@
+"""Remote client command for reporting on Podman service."""
+import json
+import sys
+
+import podman
+import yaml
+from pypodman.lib import AbstractActionBase
+
+
+class Version(AbstractActionBase):
+ """Class for reporting on Podman Service."""
+
+ @classmethod
+ def subparser(cls, parent):
+ """Add Version command to parent parser."""
+ parser = parent.add_parser(
+ 'version', help='report version on podman service')
+ parser.set_defaults(class_=cls, method='version')
+
+ def __init__(self, args):
+ """Construct Version class."""
+ super().__init__(args)
+
+ def version(self):
+ """Report on Podman Service."""
+ try:
+ info = self.client.system.info()
+ except podman.ErrorOccurred as e:
+ sys.stdout.flush()
+ print(
+ '{}'.format(e.reason).capitalize(),
+ file=sys.stderr,
+ flush=True)
+ return 1
+ else:
+ version = info._asdict()['podman']
+ host = info._asdict()['host']
+ print("Version {}".format(version['podman_version']))
+ print("Go Version {}".format(version['go_version']))
+ print("Git Commit {}".format(version['git_commit']))
+ print("OS/Arch {}/{}".format(host["os"], host["arch"]))
diff --git a/libpod/container_graph_test.go b/libpod/container_graph_test.go
index bba3d7aad..25461f1f4 100644
--- a/libpod/container_graph_test.go
+++ b/libpod/container_graph_test.go
@@ -205,6 +205,7 @@ func TestBuildContainerGraphFourContainersNoEdges(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
graph, err := buildContainerGraph([]*Container{ctr1, ctr2, ctr3, ctr4})
assert.NoError(t, err)
@@ -241,6 +242,7 @@ func TestBuildContainerGraphFourContainersTwoInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr1.config.ID
@@ -260,6 +262,7 @@ func TestBuildContainerGraphFourContainersAllInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
ctr3.config.NetNsCtr = ctr4.config.ID
@@ -281,6 +284,7 @@ func TestBuildContainerGraphFourContainersNoneInCycle(t *testing.T) {
ctr3, err := getTestCtrN("3", tmpDir)
assert.NoError(t, err)
ctr4, err := getTestCtrN("4", tmpDir)
+ assert.NoError(t, err)
ctr1.config.IPCNsCtr = ctr2.config.ID
ctr1.config.NetNsCtr = ctr3.config.ID
ctr2.config.UserNsCtr = ctr3.config.ID
diff --git a/libpod/oci.go b/libpod/oci.go
index 233bacfbb..71da830b5 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -350,7 +350,8 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
// Set the label of the conmon process to be level :s0
// This will allow the container processes to talk to fifo-files
// passed into the container by conmon
- plabel, err := selinux.CurrentLabel()
+ var plabel string
+ plabel, err = selinux.CurrentLabel()
if err != nil {
childPipe.Close()
return errors.Wrapf(err, "Failed to get current SELinux label")
@@ -360,7 +361,7 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
runtime.LockOSThread()
if c["level"] != "s0" && c["level"] != "" {
c["level"] = "s0"
- if err := label.SetProcessLabel(c.Get()); err != nil {
+ if err = label.SetProcessLabel(c.Get()); err != nil {
runtime.UnlockOSThread()
return err
}
diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go
index e6b7cbe4f..b159eae78 100644
--- a/libpod/oci_linux.go
+++ b/libpod/oci_linux.go
@@ -19,6 +19,8 @@ import (
"golang.org/x/sys/unix"
)
+const unknownPackage = "Unknown"
+
func (r *OCIRuntime) moveConmonToCgroup(ctr *Container, cgroupParent string, cmd *exec.Cmd) error {
if os.Geteuid() == 0 {
if r.cgroupManager == SystemdCgroupsManager {
@@ -112,7 +114,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
}
func rpmVersion(path string) string {
- output := "Unknown"
+ output := unknownPackage
cmd := exec.Command("/usr/bin/rpm", "-q", "-f", path)
if outp, err := cmd.Output(); err == nil {
output = string(outp)
@@ -121,7 +123,7 @@ func rpmVersion(path string) string {
}
func dpkgVersion(path string) string {
- output := "Unknown"
+ output := unknownPackage
cmd := exec.Command("/usr/bin/dpkg", "-S", path)
if outp, err := cmd.Output(); err == nil {
output = string(outp)
@@ -130,14 +132,14 @@ func dpkgVersion(path string) string {
}
func (r *OCIRuntime) pathPackage() string {
- if out := rpmVersion(r.path); out != "Unknown" {
+ if out := rpmVersion(r.path); out != unknownPackage {
return out
}
return dpkgVersion(r.path)
}
func (r *OCIRuntime) conmonPackage() string {
- if out := rpmVersion(r.conmonPath); out != "Unknown" {
+ if out := rpmVersion(r.conmonPath); out != unknownPackage {
return out
}
return dpkgVersion(r.conmonPath)
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index d14c61c39..42e285b53 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -271,6 +271,9 @@ func (i *LibpodAPI) InspectImage(call iopodman.VarlinkCall, name string) error {
return call.ReplyImageNotFound(name)
}
inspectInfo, err := newImage.Inspect(getContext())
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
b, err := json.Marshal(inspectInfo)
if err != nil {
return call.ReplyErrorOccurred(fmt.Sprintf("unable to serialize"))
diff --git a/vendor.conf b/vendor.conf
index 9dda16e80..c8e968648 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -92,10 +92,11 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master
github.com/varlink/go master
-github.com/containers/buildah 46c577c87d5a7ab30ef40cfa695cd2b96b32b117
+github.com/containers/buildah 795d43e60e5a1ab283981b79eeda1dd14a65a0bd
github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master
github.com/ulikunitz/xz v0.5.4
github.com/mailru/easyjson 03f2033d19d5860aef995fe360ac7d395cd8ce65
github.com/coreos/go-iptables 25d087f3cffd9aedc0c2b7eff25f23cbf3c20fe1
+github.com/google/shlex c34317bd91bf98fab745d77b03933cf8769299fe
diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md
index 6a79e524b..2b539bba8 100644
--- a/vendor/github.com/containers/buildah/README.md
+++ b/vendor/github.com/containers/buildah/README.md
@@ -107,6 +107,7 @@ $ sudo ./lighttpd.sh
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
+| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 9994d6cd0..1a642ed3d 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -224,6 +224,7 @@ func GetBuildInfo(b *Builder) BuilderInfo {
ContainerID: b.ContainerID,
MountPoint: b.MountPoint,
ProcessLabel: b.ProcessLabel,
+ MountLabel: b.MountLabel,
ImageAnnotations: b.ImageAnnotations,
ImageCreatedBy: b.ImageCreatedBy,
OCIv1: b.OCIv1,
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 51e2d2bd4..8cfefb3de 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -1147,7 +1147,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
if uintptr(fs.Flags)&expectedFlags != expectedFlags {
if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags")
+ return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target)
}
}
}
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 56a901925..be59215df 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -2,12 +2,14 @@ package buildah
import (
"io"
-
- "github.com/sirupsen/logrus"
+ "os"
+ "path/filepath"
cp "github.com/containers/image/copy"
"github.com/containers/image/transports"
"github.com/containers/image/types"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/sirupsen/logrus"
)
const (
@@ -17,10 +19,20 @@ const (
DOCKER = "docker"
)
+// userRegistriesFile is the path to the per user registry configuration file.
+var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf")
+
func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
sourceCtx := &types.SystemContext{}
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sourceCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
}
sourceInsecure, err := isReferenceInsecure(sourceReference, sourceCtx)
if err != nil {
@@ -33,6 +45,12 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference
destinationCtx := &types.SystemContext{}
if destinationSystemContext != nil {
*destinationCtx = *destinationSystemContext
+ } else {
+ if rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ destinationCtx.SystemRegistriesConfPath = userRegistriesFile
+ }
+ }
}
destinationInsecure, err := isReferenceInsecure(destinationReference, destinationCtx)
if err != nil {
@@ -58,5 +76,11 @@ func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string)
if signaturePolicyPath != "" {
sc.SignaturePolicyPath = signaturePolicyPath
}
+ if sc.SystemRegistriesConfPath == "" && rootless.IsRootless() {
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sc.SystemRegistriesConfPath = userRegistriesFile
+ }
+
+ }
return sc
}
diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go
index 25f76cf74..e3bddba20 100644
--- a/vendor/github.com/containers/buildah/delete.go
+++ b/vendor/github.com/containers/buildah/delete.go
@@ -1,7 +1,6 @@
package buildah
import (
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@@ -14,5 +13,5 @@ func (b *Builder) Delete() error {
b.MountPoint = ""
b.Container = ""
b.ContainerID = ""
- return label.ReleaseLabel(b.ProcessLabel)
+ return nil
}
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 31aff9eea..c0bf90ddd 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -107,7 +107,6 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
// compression that we'll be applying.
func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
- //TODO: Convert to manifest.DockerV2Schema2LayerUncompressedMediaType once available
dmediaType = docker.V2S2MediaTypeUncompressedLayer
if i.compression != archive.Uncompressed {
switch i.compression {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 41d85cbc6..292ff9541 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -563,39 +563,39 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
registry: options.Registry,
transport: options.Transport,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
- quiet: options.Quiet,
- runtime: options.Runtime,
- runtimeArgs: options.RuntimeArgs,
- transientMounts: options.TransientMounts,
- compression: options.Compression,
- output: options.Output,
- outputFormat: options.OutputFormat,
- additionalTags: options.AdditionalTags,
- signaturePolicyPath: options.SignaturePolicyPath,
- systemContext: options.SystemContext,
- volumeCache: make(map[string]string),
- volumeCacheInfo: make(map[string]os.FileInfo),
- log: options.Log,
- in: options.In,
- out: options.Out,
- err: options.Err,
- reportWriter: options.ReportWriter,
- isolation: options.Isolation,
- namespaceOptions: options.NamespaceOptions,
- configureNetwork: options.ConfigureNetwork,
- cniPluginPath: options.CNIPluginPath,
- cniConfigDir: options.CNIConfigDir,
- idmappingOptions: options.IDMappingOptions,
- commonBuildOptions: options.CommonBuildOpts,
- defaultMountsFilePath: options.DefaultMountsFilePath,
- iidfile: options.IIDFile,
- squash: options.Squash,
- labels: append([]string{}, options.Labels...),
- annotations: append([]string{}, options.Annotations...),
- layers: options.Layers,
- noCache: options.NoCache,
- removeIntermediateCtrs: options.RemoveIntermediateCtrs,
- forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ quiet: options.Quiet,
+ runtime: options.Runtime,
+ runtimeArgs: options.RuntimeArgs,
+ transientMounts: options.TransientMounts,
+ compression: options.Compression,
+ output: options.Output,
+ outputFormat: options.OutputFormat,
+ additionalTags: options.AdditionalTags,
+ signaturePolicyPath: options.SignaturePolicyPath,
+ systemContext: options.SystemContext,
+ volumeCache: make(map[string]string),
+ volumeCacheInfo: make(map[string]os.FileInfo),
+ log: options.Log,
+ in: options.In,
+ out: options.Out,
+ err: options.Err,
+ reportWriter: options.ReportWriter,
+ isolation: options.Isolation,
+ namespaceOptions: options.NamespaceOptions,
+ configureNetwork: options.ConfigureNetwork,
+ cniPluginPath: options.CNIPluginPath,
+ cniConfigDir: options.CNIConfigDir,
+ idmappingOptions: options.IDMappingOptions,
+ commonBuildOptions: options.CommonBuildOpts,
+ defaultMountsFilePath: options.DefaultMountsFilePath,
+ iidfile: options.IIDFile,
+ squash: options.Squash,
+ labels: append([]string{}, options.Labels...),
+ annotations: append([]string{}, options.Annotations...),
+ layers: options.Layers,
+ noCache: options.NoCache,
+ removeIntermediateCtrs: options.RemoveIntermediateCtrs,
+ forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -764,7 +764,7 @@ func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) {
if err != nil {
candidates, _, err := util.ResolveName(b.output, "", b.systemContext, b.store)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q: %v", b.output)
+ return nil, errors.Wrapf(err, "error parsing target image name %q", b.output)
}
if len(candidates) == 0 {
return nil, errors.Errorf("error parsing target image name %q", b.output)
@@ -1044,17 +1044,14 @@ func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (
}
continue
}
- // For local files, walk the file tree and check the time stamps.
- timeIsGreater := false
- err := filepath.Walk(item, func(path string, info os.FileInfo, err error) error {
- if info.ModTime().After(*historyTime) {
- timeIsGreater = true
- return nil
- }
- return nil
- })
+ // Walks the file tree for local files and uses chroot to ensure we don't escape out of the allowed path
+ // when resolving any symlinks.
+ // Change the time format to ensure we don't run into a parsing error when converting again from string
+ // to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular
+ // time format here when converting to a string.
+ timeIsGreater, err := resolveModifiedTime(b.contextDir, item, historyTime.Format(time.RFC3339Nano))
if err != nil {
- return false, errors.Wrapf(err, "error walking file tree %q", item)
+ return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item)
}
if timeIsGreater {
return false, nil
@@ -1289,15 +1286,24 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
} else {
// If the Dockerfile isn't found try prepending the
// context directory to it.
- if _, err := os.Stat(dfile); os.IsNotExist(err) {
+ dinfo, err := os.Stat(dfile)
+ if os.IsNotExist(err) {
dfile = filepath.Join(options.ContextDirectory, dfile)
}
+ dinfo, err = os.Stat(dfile)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
+ }
+ // If given a directory, add '/Dockerfile' to it.
+ if dinfo.Mode().IsDir() {
+ dfile = filepath.Join(dfile, "Dockerfile")
+ }
logrus.Debugf("reading local Dockerfile %q", dfile)
contents, err := os.Open(dfile)
if err != nil {
return "", nil, errors.Wrapf(err, "error reading %q", dfile)
}
- dinfo, err := contents.Stat()
+ dinfo, err = contents.Stat()
if err != nil {
contents.Close()
return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
index 20e396f1f..2269b8dcc 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
+ "time"
"github.com/containers/storage/pkg/reexec"
"github.com/pkg/errors"
@@ -14,13 +15,18 @@ import (
const (
symlinkChrootedCommand = "chrootsymlinks-resolve"
+ symlinkModifiedTime = "modtimesymlinks-resolve"
maxSymlinksResolved = 40
)
func init() {
reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks)
+ reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified)
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
func resolveChrootedSymlinks() {
status := 0
flag.Parse()
@@ -39,7 +45,7 @@ func resolveChrootedSymlinks() {
}
// Our second parameter is the path name to evaluate for symbolic links
- symLink, err := getSymbolicLink(flag.Arg(0), flag.Arg(1))
+ symLink, err := getSymbolicLink(flag.Arg(1))
if err != nil {
fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err)
os.Exit(1)
@@ -51,7 +57,8 @@ func resolveChrootedSymlinks() {
os.Exit(status)
}
-// ResolveSymlink resolves any symlink in filename in the context of rootdir.
+// ResolveSymLink (in the grandparent process) resolves any symlink in filename
+// in the context of rootdir.
func ResolveSymLink(rootdir, filename string) (string, error) {
// The child process expects a chroot and one path that
// will be consulted relative to the chroot directory and evaluated
@@ -62,32 +69,121 @@ func ResolveSymLink(rootdir, filename string) (string, error) {
return "", errors.Wrapf(err, string(output))
}
- // Hand back the resolved symlink, will be "" if a symlink is not found
+ // Hand back the resolved symlink, will be filename if a symlink is not found
return string(output), nil
}
+// main() for grandparent subprocess. Its main job is to shuttle stdio back
+// and forth, managing a pseudo-terminal if we want one, for our child, the
+// parent subprocess.
+func resolveSymlinkTimeModified() {
+ status := 0
+ flag.Parse()
+ if len(flag.Args()) < 1 {
+ os.Exit(1)
+ }
+ // Our first parameter is the directory to chroot into.
+ if err := unix.Chdir(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chdir(): %v\n", err)
+ os.Exit(1)
+ }
+ if err := unix.Chroot(flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "chroot(): %v\n", err)
+ os.Exit(1)
+ }
+
+ // Our second parameter is the path name to evaluate for symbolic links.
+ // Our third parameter is the time the cached intermediate image was created.
+ // We check whether the modified time of the filepath we provide is after the time the cached image was created.
+ timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err)
+ os.Exit(1)
+ }
+ if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil {
+ fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err)
+ os.Exit(1)
+ }
+ os.Exit(status)
+}
+
+// resolveModifiedTime (in the grandparent process) checks filename for any symlinks,
+// resolves it and compares the modified time of the file with historyTime, which is
+// the creation time of the cached image. It returns true if filename was modified after
+// historyTime, otherwise returns false.
+func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
+ // The child process expects a chroot and one path that
+ // will be consulted relative to the chroot directory and evaluated
+ // for any symbolic links present.
+ cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return false, errors.Wrapf(err, string(output))
+ }
+ // Hand back true/false depending on in the file was modified after the caches image was created.
+ return string(output) == "true", nil
+}
+
+// modTimeIsGreater goes through the files added/copied in using the Dockerfile and
+// checks the time stamp (follows symlinks) with the time stamp of when the cached
+// image was created. IT compares the two and returns true if the file was modified
+// after the cached image was created, otherwise it returns false.
+func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
+ var timeIsGreater bool
+
+ // Convert historyTime from string to time.Time for comparison
+ histTime, err := time.Parse(time.RFC3339Nano, historyTime)
+ if err != nil {
+ return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime)
+ }
+ // Walk the file tree and check the time stamps.
+ // Since we are chroot in rootdir, only want the path of the actual filename, i.e path - rootdir.
+ // +1 to account for the extra "/" (e.g rootdir=/home/user/mydir, path=/home/user/mydir/myfile.json)
+ err = filepath.Walk(path[len(rootdir)+1:], func(path string, info os.FileInfo, err error) error {
+ modTime := info.ModTime()
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ // Evaluate any symlink that occurs to get updated modified information
+ resolvedPath, err := filepath.EvalSymlinks(path)
+ if err != nil {
+ return errors.Wrapf(err, "error evaluating symlink %q", path)
+ }
+ fileInfo, err := os.Stat(resolvedPath)
+ if err != nil {
+ return errors.Wrapf(err, "error getting file info %q", resolvedPath)
+ }
+ modTime = fileInfo.ModTime()
+ }
+ if modTime.After(histTime) {
+ timeIsGreater = true
+ return nil
+ }
+ return nil
+ })
+ if err != nil {
+ return false, errors.Wrapf(err, "error walking file tree %q", path)
+ }
+ return timeIsGreater, err
+}
+
// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear.
// Returns what the whole target path for what "path" resolves to.
-func getSymbolicLink(rootdir, path string) (string, error) {
+func getSymbolicLink(path string) (string, error) {
var (
symPath string
symLinksResolved int
)
-
- // Splitting path as we need to resolve each parth of the path at a time
+ // Splitting path as we need to resolve each part of the path at a time
splitPath := strings.Split(path, "/")
if splitPath[0] == "" {
splitPath = splitPath[1:]
symPath = "/"
}
-
for _, p := range splitPath {
// If we have resolved 40 symlinks, that means something is terribly wrong
// will return an error and exit
if symLinksResolved >= maxSymlinksResolved {
return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved)
}
-
symPath = filepath.Join(symPath, p)
isSymlink, resolvedPath, err := hasSymlink(symPath)
if err != nil {
@@ -119,16 +215,21 @@ func getSymbolicLink(rootdir, path string) (string, error) {
// otherwise it returns false and path
func hasSymlink(path string) (bool, string, error) {
info, err := os.Lstat(path)
- if os.IsNotExist(err) {
- if err = os.MkdirAll(path, 0755); err != nil {
- return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
- }
- info, err = os.Lstat(path)
- if err != nil {
- return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(path, 0755); err != nil {
+ return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
+ }
+ info, err = os.Lstat(path)
+ if err != nil {
+ return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ }
+ } else {
+ return false, path, errors.Wrapf(err, "error get stat of path %q", path)
}
}
- // Return false and path as path is not a symlink
+
+ // Return false and path as path if not a symlink
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
return false, path, nil
}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 8b0e774ba..13bebf420 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -3,6 +3,7 @@ package buildah
import (
"context"
"fmt"
+ "math/rand"
"strings"
"github.com/containers/buildah/util"
@@ -12,7 +13,6 @@ import (
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/storage"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/openshift/imagebuilder"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -23,11 +23,6 @@ const (
// as "no image".
BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
- // DefaultTransport is a prefix that we apply to an image name if we
- // can't find one in the local Store, in order to generate a source
- // reference for the image that we can then copy to the local Store.
- DefaultTransport = "docker://"
-
// minimumTruncatedIDLength is the minimum length of an identifier that
// we'll accept as possibly being a truncated image ID.
minimumTruncatedIDLength = 3
@@ -150,7 +145,7 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
logrus.Debugf("error parsing image name %q as given, trying with transport %q: %v", image, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
srcRef2, err := alltransports.ParseImageName(transport + image)
@@ -232,6 +227,27 @@ func resolveImage(ctx context.Context, systemContext *types.SystemContext, store
}
}
+func containerNameExist(name string, containers []storage.Container) bool {
+ for _, container := range containers {
+ for _, cname := range container.Names {
+ if cname == name {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func findUnusedContainer(name string, containers []storage.Container) string {
+ suffix := 1
+ tmpName := name
+ for containerNameExist(tmpName, containers) {
+ tmpName = fmt.Sprintf("%s-%d", name, suffix)
+ suffix++
+ }
+ return tmpName
+}
+
func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
var ref types.ImageReference
var img *storage.Image
@@ -241,7 +257,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
options.FromImage = ""
}
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
@@ -277,23 +293,33 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
name = imageNamePrefix(image) + "-" + name
}
}
+ var container *storage.Container
+ tmpName := name
+ if options.Container == "" {
+ containers, err := store.Containers()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to check for container names")
+ }
+ tmpName = findUnusedContainer(tmpName, containers)
+ }
- coptions := storage.ContainerOptions{}
- coptions.IDMappingOptions = newContainerIDMappingOptions(options.IDMappingOptions)
-
- container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions)
- suffix := 1
- for err != nil && errors.Cause(err) == storage.ErrDuplicateName && options.Container == "" {
- suffix++
- tmpName := fmt.Sprintf("%s-%d", name, suffix)
- if container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions); err == nil {
+ conflict := 100
+ for true {
+ coptions := storage.ContainerOptions{
+ LabelOpts: options.CommonBuildOpts.LabelOpts,
+ IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions),
+ }
+ container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions)
+ if err == nil {
name = tmpName
+ break
}
+ if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" {
+ return nil, errors.Wrapf(err, "error creating container")
+ }
+ tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict)
+ conflict = conflict * 10
}
- if err != nil {
- return nil, errors.Wrapf(err, "error creating container")
- }
-
defer func() {
if err != nil {
if err2 := store.DeleteContainer(container.ID); err2 != nil {
@@ -302,13 +328,6 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
}()
- if err = ReserveSELinuxLabels(store, container.ID); err != nil {
- return nil, err
- }
- processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts)
- if err != nil {
- return nil, err
- }
uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap)
defaultNamespaceOptions, err := DefaultNamespaceOptions()
@@ -328,8 +347,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
ContainerID: container.ID,
ImageAnnotations: map[string]string{},
ImageCreatedBy: "",
- ProcessLabel: processLabel,
- MountLabel: mountLabel,
+ ProcessLabel: container.ProcessLabel(),
+ MountLabel: container.MountLabel(),
DefaultMountsFilePath: options.DefaultMountsFilePath,
Isolation: options.Isolation,
NamespaceOptions: namespaceOptions,
@@ -351,7 +370,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
if options.Mount {
- _, err = builder.Mount(mountLabel)
+ _, err = builder.Mount(container.MountLabel())
if err != nil {
return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID)
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index b54663f5d..03b340294 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -71,6 +71,10 @@ var (
LayerFlags = []cli.Flag{
cli.BoolFlag{
+ Name: "force-rm",
+ Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
+ },
+ cli.BoolFlag{
Name: "layers",
Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()),
},
@@ -115,10 +119,6 @@ var (
Name: "file, f",
Usage: "`pathname or URL` of a Dockerfile",
},
- cli.BoolFlag{
- Name: "force-rm",
- Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.",
- },
cli.StringFlag{
Name: "format",
Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.",
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 52269541a..1a51edb0e 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -146,11 +146,11 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
srcRef, err := alltransports.ParseImageName(spec)
if err != nil {
if options.Transport == "" {
- options.Transport = DefaultTransport
+ options.Transport = util.DefaultTransport
}
logrus.Debugf("error parsing image name %q, trying with transport %q: %v", spec, options.Transport, err)
transport := options.Transport
- if transport != DefaultTransport {
+ if transport != util.DefaultTransport {
transport = transport + ":"
}
spec = transport + spec
@@ -201,6 +201,7 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
logrus.Debugf("copying %q to %q", spec, destName)
if _, err := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, destRef, nil, "")); err != nil {
+ logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", spec, destName, err)
return nil, err
}
return destRef, nil
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index 718ef4e36..636a204b3 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -451,7 +451,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
copyWithTar := b.copyWithTar(nil, nil)
- builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes)
+ builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
if err != nil {
return err
}
@@ -493,15 +493,21 @@ func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts
return mounts, nil
}
-func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string) ([]specs.Mount, error) {
+func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
var mounts []specs.Mount
+ hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
for _, volume := range builtinVolumes {
subdir := digest.Canonical.FromString(volume).Hex()
volumePath := filepath.Join(containerDir, "buildah-volumes", subdir)
+ srcPath := filepath.Join(mountPoint, volume)
+ initializeVolume := false
// If we need to, initialize the volume path's initial contents.
- if _, err := os.Stat(volumePath); err != nil && os.IsNotExist(err) {
+ if _, err := os.Stat(volumePath); err != nil {
+ if !os.IsNotExist(err) {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", volumePath, volume)
+ }
logrus.Debugf("setting up built-in volume at %q", volumePath)
if err = os.MkdirAll(volumePath, 0755); err != nil {
return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume)
@@ -509,11 +515,21 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
if err = label.Relabel(volumePath, mountLabel, false); err != nil {
return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume)
}
- srcPath := filepath.Join(mountPoint, volume)
- stat, err := os.Stat(srcPath)
- if err != nil {
+ initializeVolume = true
+ }
+ stat, err := os.Stat(srcPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
}
+ if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil {
+ return nil, errors.Wrapf(err, "error creating directory %q for volume %q", srcPath, volume)
+ }
+ if stat, err = os.Stat(srcPath); err != nil {
+ return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
+ }
+ }
+ if initializeVolume {
if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
return nil, errors.Wrapf(err, "failed to chmod %q for volume %q", volumePath, volume)
}
@@ -1044,24 +1060,31 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
- hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
- if err != nil {
- return err
- }
- resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
- if err != nil {
- return err
- }
+ bindFiles := make(map[string]string)
+ namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...)
+ volumes := b.Volumes()
- if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
- return err
+ if !contains(volumes, "/etc/hosts") {
+ hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/hosts"] = hostFile
+
+ if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
+ return err
+ }
}
- bindFiles := map[string]string{
- "/etc/hosts": hostFile,
- "/etc/resolv.conf": resolvFile,
+ if !contains(volumes, "/etc/resolv.conf") {
+ resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
+ if err != nil {
+ return err
+ }
+ bindFiles["/etc/resolv.conf"] = resolvFile
}
- err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, append(b.NamespaceOptions, options.NamespaceOptions...))
+
+ err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions)
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
}
@@ -1095,7 +1118,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
} else {
moreCreateArgs = nil
}
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
case IsolationChroot:
err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr)
case IsolationOCIRootless:
@@ -1109,13 +1132,22 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
}
options.Args = append(options.Args, rootlessFlag...)
- err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
+ err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path))
default:
err = errors.Errorf("don't know how to run this command")
}
return err
}
+func contains(volumes []string, v string) bool {
+ for _, i := range volumes {
+ if i == v {
+ return true
+ }
+ }
+ return false
+}
+
func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error {
switch isolation {
case IsolationOCIRootless:
@@ -1123,10 +1155,22 @@ func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions)
logrus.Debugf("Forcing use of an IPC namespace.")
}
options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)})
- if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host {
- logrus.Debugf("Disabling network namespace.")
+ _, err := exec.LookPath("slirp4netns")
+ hostNetworking := err != nil
+ networkNamespacePath := ""
+ if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ hostNetworking = ns.Host
+ networkNamespacePath = ns.Path
+ if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) {
+ logrus.Debugf("Disabling network namespace configuration.")
+ networkNamespacePath = ""
+ }
}
- options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.NetworkNamespace), Host: true})
+ options.NamespaceOptions.AddOrReplace(NamespaceOption{
+ Name: string(specs.NetworkNamespace),
+ Host: hostNetworking,
+ Path: networkNamespacePath,
+ })
if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host {
logrus.Debugf("Forcing use of a PID namespace.")
}
@@ -1227,9 +1271,10 @@ type runUsingRuntimeSubprocOptions struct {
ConfigureNetworks []string
MoreCreateArgs []string
ContainerName string
+ Isolation Isolation
}
-func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
+func (b *Builder) runUsingRuntimeSubproc(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
var confwg sync.WaitGroup
config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
Options: options,
@@ -1240,6 +1285,7 @@ func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bo
ConfigureNetworks: configureNetworks,
MoreCreateArgs: moreCreateArgs,
ContainerName: containerName,
+ Isolation: isolation,
})
if conferr != nil {
return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand)
@@ -1318,7 +1364,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
// Run the container, start to finish.
- status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
+ status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
if err != nil {
fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
os.Exit(1)
@@ -1333,7 +1379,7 @@ func runUsingRuntimeMain() {
os.Exit(1)
}
-func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
+func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
// Lock the caller to a single OS-level thread.
runtime.LockOSThread()
@@ -1490,7 +1536,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetwork
}()
if configureNetwork {
- teardown, err := runConfigureNetwork(options, configureNetworks, pid, containerName, spec.Process.Args)
+ teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, spec.Process.Args)
if teardown != nil {
defer teardown()
}
@@ -1623,9 +1669,81 @@ func runCollectOutput(fds, closeBeforeReadingFds []int) string {
}
return b.String()
}
+func setupRootlessNetwork(pid int) (teardown func(), err error) {
+ slirp4netns, err := exec.LookPath("slirp4netns")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot find slirp4netns")
+ }
+
+ rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe")
+ }
+ defer rootlessSlirpSyncR.Close()
+
+ // Be sure there are no fds inherited to slirp4netns except the sync pipe
+ files, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot list open fds")
+ }
+ for _, f := range files {
+ fd, err := strconv.Atoi(f.Name())
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot parse fd")
+ }
+ if fd == int(rootlessSlirpSyncW.Fd()) {
+ continue
+ }
+ unix.CloseOnExec(fd)
+ }
+
+ cmd := exec.Command(slirp4netns, "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0")
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
+ cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
+
+ err = cmd.Start()
+ rootlessSlirpSyncW.Close()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot start slirp4netns")
+ }
-func runConfigureNetwork(options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
+ b := make([]byte, 1)
+ for {
+ if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil {
+ return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout")
+ }
+ if _, err := rootlessSlirpSyncR.Read(b); err == nil {
+ break
+ } else {
+ if os.IsTimeout(err) {
+ // Check if the process is still running.
+ var status syscall.WaitStatus
+ _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to read slirp4netns process status")
+ }
+ if status.Exited() || status.Signaled() {
+ return nil, errors.New("slirp4netns failed")
+ }
+
+ continue
+ }
+ return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe")
+ }
+ }
+
+ return func() {
+ cmd.Process.Kill()
+ cmd.Wait()
+ }, nil
+}
+
+func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) {
var netconf, undo []*libcni.NetworkConfigList
+
+ if isolation == IsolationOCIRootless {
+ return setupRootlessNetwork(pid)
+ }
// Scan for CNI configuration files.
confdir := options.CNIConfigDir
files, err := libcni.ConfFiles(confdir, []string{".conf"})
@@ -1956,7 +2074,7 @@ func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Bo
for i := range scm {
fds, err := unix.ParseUnixRights(&scm[i])
if err != nil {
- return -1, errors.Wrapf(err, "error parsing unix rights control message: %v")
+ return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i])
}
logrus.Debugf("fds: %v", fds)
if len(fds) == 0 {
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go
index fbe623660..2a970b8d6 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/unshare/unshare.go
@@ -55,6 +55,10 @@ func (c *Cmd) Start() error {
}
c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags))
+ // Please the libpod "rootless" package to find the expected env variables.
+ c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done")
+ c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid()))
+
// Create the pipe for reading the child's PID.
pidRead, pidWrite, err := os.Pipe()
if err != nil {
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 3a415a7f3..b2451b78b 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -7,10 +7,8 @@ import (
"net/url"
"os"
"path"
- "path/filepath"
"strconv"
"strings"
- "syscall"
"github.com/containers/image/directory"
dockerarchive "github.com/containers/image/docker/archive"
@@ -31,6 +29,10 @@ import (
const (
minimumTruncatedIDLength = 3
+ // DefaultTransport is a prefix that we apply to an image name if we
+ // can't find one in the local Store, in order to generate a source
+ // reference for the image that we can then copy to the local Store.
+ DefaultTransport = "docker://"
)
var (
@@ -89,6 +91,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
}
}
+ name = strings.TrimPrefix(name, DefaultTransport)
// If the image name already included a domain component, we're done.
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
@@ -450,60 +453,6 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap,
return uid, gid, nil
}
-// UnsharedRootPath returns a location under ($XDG_DATA_HOME/containers/storage,
-// or $HOME/.local/share/containers/storage, or
-// (the user's home directory)/.local/share/containers/storage, or an error.
-func UnsharedRootPath(homedir string) (string, error) {
- // If $XDG_DATA_HOME is defined...
- if envDataHome, haveDataHome := os.LookupEnv("XDG_DATA_HOME"); haveDataHome {
- return filepath.Join(envDataHome, "containers", "storage"), nil
- }
- // If $XDG_DATA_HOME is not defined, but $HOME is defined...
- if envHomedir, haveHomedir := os.LookupEnv("HOME"); haveHomedir {
- // Default to the user's $HOME/.local/share/containers/storage subdirectory.
- return filepath.Join(envHomedir, ".local", "share", "containers", "storage"), nil
- }
- // If we know where our home directory is...
- if homedir != "" {
- // Default to the user's homedir/.local/share/containers/storage subdirectory.
- return filepath.Join(homedir, ".local", "share", "containers", "storage"), nil
- }
- return "", errors.New("unable to determine a --root location: neither $XDG_DATA_HOME nor $HOME is set")
-}
-
-// UnsharedRunrootPath returns $XDG_RUNTIME_DIR/run, /var/run/user/(the user's UID)/run, or an error.
-func UnsharedRunrootPath(uid string) (string, error) {
- // If $XDG_RUNTIME_DIR is defined...
- if envRuntimeDir, haveRuntimeDir := os.LookupEnv("XDG_RUNTIME_DIR"); haveRuntimeDir {
- return filepath.Join(envRuntimeDir, "run"), nil
- }
- var runtimeDir string
- // If $XDG_RUNTIME_DIR is not defined, but we know our UID...
- if uid != "" {
- tmpDir := filepath.Join("/var/run/user", uid)
- os.MkdirAll(tmpDir, 0700)
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- runtimeDir = filepath.Join(resolvedHome, "rundir")
- }
- if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
- return "", errors.New("could not set XDG_RUNTIME_DIR")
- }
- return runtimeDir, nil
-}
-
// GetPolicyContext sets up, initializes and returns a new context for the specified policy
func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
policy, err := signature.DefaultPolicy(ctx)
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index d79412afc..c4410d0af 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -4,8 +4,8 @@ github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
github.com/containers/image 5e5b67d6b1cf43cc349128ec3ed7d5283a6cc0d1
-github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75
-github.com/containers/storage 41294c85d97bef688e18f710402895dbecde3308
+github.com/containers/libpod e75469ab99c48e9fbe2b36ade229d384bdea9144
+github.com/containers/storage 09abf3a26b8a3aa69e29fd7faeb260b98d675759
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
@@ -36,7 +36,7 @@ github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc master
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
+github.com/opencontainers/selinux master
github.com/openshift/imagebuilder master
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
github.com/pborman/uuid master
diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/shlex/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README
new file mode 100644
index 000000000..c86bcc066
--- /dev/null
+++ b/vendor/github.com/google/shlex/README
@@ -0,0 +1,2 @@
+go-shlex is a simple lexer for go that supports shell-style quoting,
+commenting, and escaping.
diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go
new file mode 100644
index 000000000..d98308bce
--- /dev/null
+++ b/vendor/github.com/google/shlex/shlex.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2012 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package shlex implements a simple lexer which splits input in to tokens using
+shell-style rules for quoting and commenting.
+
+The basic use case uses the default ASCII lexer to split a string into sub-strings:
+
+ shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
+
+To process a stream of strings:
+
+ l := NewLexer(os.Stdin)
+ for ; token, err := l.Next(); err != nil {
+ // process token
+ }
+
+To access the raw token stream (which includes tokens for comments):
+
+ t := NewTokenizer(os.Stdin)
+ for ; token, err := t.Next(); err != nil {
+ // process token
+ }
+
+*/
+package shlex
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// TokenType is a top-level token classification: A word, space, comment, unknown.
+type TokenType int
+
+// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
+type runeTokenClass int
+
+// the internal state used by the lexer state machine
+type lexerState int
+
+// Token is a (type, value) pair representing a lexographical token.
+type Token struct {
+ tokenType TokenType
+ value string
+}
+
+// Equal reports whether tokens a, and b, are equal.
+// Two tokens are equal if both their types and values are equal. A nil token can
+// never be equal to another token.
+func (a *Token) Equal(b *Token) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.tokenType != b.tokenType {
+ return false
+ }
+ return a.value == b.value
+}
+
+// Named classes of UTF-8 runes
+const (
+ spaceRunes = " \t\r\n"
+ escapingQuoteRunes = `"`
+ nonEscapingQuoteRunes = "'"
+ escapeRunes = `\`
+ commentRunes = "#"
+)
+
+// Classes of rune token
+const (
+ unknownRuneClass runeTokenClass = iota
+ spaceRuneClass
+ escapingQuoteRuneClass
+ nonEscapingQuoteRuneClass
+ escapeRuneClass
+ commentRuneClass
+ eofRuneClass
+)
+
+// Classes of lexographic token
+const (
+ UnknownToken TokenType = iota
+ WordToken
+ SpaceToken
+ CommentToken
+)
+
+// Lexer state machine states
+const (
+ startState lexerState = iota // no runes have been seen
+ inWordState // processing regular runes in a word
+ escapingState // we have just consumed an escape rune; the next rune is literal
+ escapingQuotedState // we have just consumed an escape rune within a quoted string
+ quotingEscapingState // we are within a quoted string that supports escaping ("...")
+ quotingState // we are within a string that does not support escaping ('...')
+ commentState // we are within a comment (everything following an unquoted or unescaped #
+)
+
+// tokenClassifier is used for classifying rune characters.
+type tokenClassifier map[rune]runeTokenClass
+
+func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
+ for _, runeChar := range runes {
+ typeMap[runeChar] = tokenType
+ }
+}
+
+// newDefaultClassifier creates a new classifier for ASCII characters.
+func newDefaultClassifier() tokenClassifier {
+ t := tokenClassifier{}
+ t.addRuneClass(spaceRunes, spaceRuneClass)
+ t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
+ t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
+ t.addRuneClass(escapeRunes, escapeRuneClass)
+ t.addRuneClass(commentRunes, commentRuneClass)
+ return t
+}
+
+// ClassifyRune classifiees a rune
+func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
+ return t[runeVal]
+}
+
+// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
+type Lexer Tokenizer
+
+// NewLexer creates a new lexer from an input stream.
+func NewLexer(r io.Reader) *Lexer {
+
+ return (*Lexer)(NewTokenizer(r))
+}
+
+// Next returns the next word, or an error. If there are no more words,
+// the error will be io.EOF.
+func (l *Lexer) Next() (string, error) {
+ for {
+ token, err := (*Tokenizer)(l).Next()
+ if err != nil {
+ return "", err
+ }
+ switch token.tokenType {
+ case WordToken:
+ return token.value, nil
+ case CommentToken:
+ // skip comments
+ default:
+ return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
+ }
+ }
+}
+
+// Tokenizer turns an input stream into a sequence of typed tokens
+type Tokenizer struct {
+ input bufio.Reader
+ classifier tokenClassifier
+}
+
+// NewTokenizer creates a new tokenizer from an input stream.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ input := bufio.NewReader(r)
+ classifier := newDefaultClassifier()
+ return &Tokenizer{
+ input: *input,
+ classifier: classifier}
+}
+
+// scanStream scans the stream for the next token using the internal state machine.
+// It will panic if it encounters a rune which it does not know how to handle.
+func (t *Tokenizer) scanStream() (*Token, error) {
+ state := startState
+ var tokenType TokenType
+ var value []rune
+ var nextRune rune
+ var nextRuneType runeTokenClass
+ var err error
+
+ for {
+ nextRune, _, err = t.input.ReadRune()
+ nextRuneType = t.classifier.ClassifyRune(nextRune)
+
+ if err == io.EOF {
+ nextRuneType = eofRuneClass
+ err = nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ switch state {
+ case startState: // no runes read yet
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ return nil, io.EOF
+ }
+ case spaceRuneClass:
+ {
+ }
+ case escapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ tokenType = WordToken
+ state = escapingState
+ }
+ case commentRuneClass:
+ {
+ tokenType = CommentToken
+ state = commentState
+ }
+ default:
+ {
+ tokenType = WordToken
+ value = append(value, nextRune)
+ state = inWordState
+ }
+ }
+ }
+ case inWordState: // in a regular word
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingState: // the rune after an escape character
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = inWordState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingQuotedState: // the next rune after an escape character, in double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = quotingEscapingState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingEscapingState: // in escaping double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingQuotedState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingState: // in non-escaping single quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case commentState: // in a comment
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ if nextRune == '\n' {
+ state = startState
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ } else {
+ value = append(value, nextRune)
+ }
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ default:
+ {
+ return nil, fmt.Errorf("Unexpected state: %v", state)
+ }
+ }
+ }
+}
+
+// Next returns the next token in the stream.
+func (t *Tokenizer) Next() (*Token, error) {
+ return t.scanStream()
+}
+
+// Split partitions a string into a slice of strings.
+func Split(s string) ([]string, error) {
+ l := NewLexer(strings.NewReader(s))
+ subStrings := make([]string, 0)
+ for {
+ word, err := l.Next()
+ if err != nil {
+ if err == io.EOF {
+ return subStrings, nil
+ }
+ return subStrings, err
+ }
+ subStrings = append(subStrings, word)
+ }
+}