aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/openshift/imagebuilder
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2018-04-25 13:26:52 -0500
committerAtomic Bot <atomic-devel@projectatomic.io>2018-04-27 20:51:07 +0000
commita824186ac9803ef5f7548df790988a4ebd2d9c07 (patch)
tree63c64e9be4d9c44bd160dd974b740231497eabcd /vendor/github.com/openshift/imagebuilder
parent4e468ce83d69e9748e80eb98a6f5bd3c5114cc7d (diff)
downloadpodman-a824186ac9803ef5f7548df790988a4ebd2d9c07.tar.gz
podman-a824186ac9803ef5f7548df790988a4ebd2d9c07.tar.bz2
podman-a824186ac9803ef5f7548df790988a4ebd2d9c07.zip
Use buildah commit and bud in podman
Vendor in buildah and use as much of commit and bug as possible for podman build and commit. Resolves #586 Signed-off-by: baude <bbaude@redhat.com> Closes: #681 Approved by: mheon
Diffstat (limited to 'vendor/github.com/openshift/imagebuilder')
-rw-r--r--vendor/github.com/openshift/imagebuilder/LICENSE192
-rw-r--r--vendor/github.com/openshift/imagebuilder/README.md104
-rw-r--r--vendor/github.com/openshift/imagebuilder/builder.go510
-rw-r--r--vendor/github.com/openshift/imagebuilder/constants.go13
-rw-r--r--vendor/github.com/openshift/imagebuilder/dispatchers.go570
-rw-r--r--vendor/github.com/openshift/imagebuilder/doc.go6
-rw-r--r--vendor/github.com/openshift/imagebuilder/evaluator.go160
-rw-r--r--vendor/github.com/openshift/imagebuilder/internals.go83
-rw-r--r--vendor/github.com/openshift/imagebuilder/shell_parser.go314
-rw-r--r--vendor/github.com/openshift/imagebuilder/signal/README.md1
-rw-r--r--vendor/github.com/openshift/imagebuilder/signal/signal.go25
-rw-r--r--vendor/github.com/openshift/imagebuilder/signal/signals.go79
-rw-r--r--vendor/github.com/openshift/imagebuilder/strslice/strslice.go30
13 files changed, 2087 insertions, 0 deletions
diff --git a/vendor/github.com/openshift/imagebuilder/LICENSE b/vendor/github.com/openshift/imagebuilder/LICENSE
new file mode 100644
index 000000000..ea21aad9d
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+ Copyright 2016 The OpenShift Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/openshift/imagebuilder/README.md b/vendor/github.com/openshift/imagebuilder/README.md
new file mode 100644
index 000000000..2f9c110dd
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/README.md
@@ -0,0 +1,104 @@
+Docker / OCI Image Builder
+==========================
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/openshift/imagebuilder)](https://goreportcard.com/report/github.com/openshift/imagebuilder)
+[![GoDoc](https://godoc.org/github.com/openshift/imagebuilder?status.png)](https://godoc.org/github.com/openshift/imagebuilder)
+[![Travis](https://travis-ci.org/openshift/imagebuilder.svg?branch=master)](https://travis-ci.org/openshift/imagebuilder)
+[![Join the chat at freenode:openshift-dev](https://img.shields.io/badge/irc-freenode%3A%20%23openshift--dev-blue.svg)](http://webchat.freenode.net/?channels=%23openshift-dev)
+
+Note: this library is beta and may contain bugs that prevent images from being identical to Docker build. Test your images (and add to our conformance suite)!
+
+This library supports using the Dockerfile syntax to build Docker
+compatible images, without invoking Docker build. It is intended to give
+clients more control over how a Docker build is run, including:
+
+* Instead of building one layer per line, run all instructions in the
+ same container
+* Set Docker HostConfig settings like network and memory controls that
+ are not available when running Docker builds
+* Mount external files into the build that are not persisted as part of
+ the final image (i.e. "secrets")
+* If there are no RUN commands in the Dockerfile, the container is created
+ and committed, but never started.
+
+The final image should be 99.9% compatible with regular docker builds,
+but bugs are always possible.
+
+Future goals include:
+
+* Output OCI compatible images
+* Support other container execution engines, like runc or rkt
+* Better conformance testing
+* Windows support
+
+## Install and Run
+
+To download and install the library and the binary, set up a Golang build environment and with `GOPATH` set run:
+
+```
+$ go get -u github.com/openshift/imagebuilder/cmd/imagebuilder
+```
+
+The included command line takes one argument, a path to a directory containing a Dockerfile. The `-t` option
+can be used to specify an image to tag as:
+
+```
+$ imagebuilder [-t TAG] DIRECTORY
+```
+
+To mount a file into the image for build that will not be present in the final output image, run:
+
+```
+$ imagebuilder --mount ~/secrets/private.key:/etc/keys/private.key path/to/my/code testimage
+```
+
+Any processes in the Dockerfile will have access to `/etc/keys/private.key`, but that file will not be part of the committed image.
+
+Running `--mount` requires Docker 1.10 or newer, as it uses a Docker volume to hold the mounted files and the volume API was not
+available in earlier versions.
+
+You can also customize which Dockerfile is run, or run multiple Dockerfiles in sequence (the FROM is ignored on
+later files):
+
+```
+$ imagebuilder -f Dockerfile:Dockerfile.extra .
+```
+
+will build the current directory and combine the first Dockerfile with the second. The FROM in the second image
+is ignored.
+
+
+## Code Example
+
+```
+f, err := os.Open("path/to/Dockerfile")
+if err != nil {
+ return err
+}
+defer f.Close()
+
+e := builder.NewClientExecutor(o.Client)
+e.Out, e.ErrOut = os.Stdout, os.Stderr
+e.AllowPull = true
+e.Directory = "context/directory"
+e.Tag = "name/of-image:and-tag"
+e.AuthFn = nil // ... pass a function to retrieve authorization info
+e.LogFn = func(format string, args ...interface{}) {
+ fmt.Fprintf(e.ErrOut, "--> %s\n", fmt.Sprintf(format, args...))
+}
+
+buildErr := e.Build(f, map[string]string{"arg1":"value1"})
+if err := e.Cleanup(); err != nil {
+ fmt.Fprintf(e.ErrOut, "error: Unable to clean up build: %v\n", err)
+}
+
+return buildErr
+```
+
+Example of usage from OpenShift's experimental `dockerbuild` [command with mount secrets](https://github.com/openshift/origin/blob/26c9e032ff42f613fe10649cd7c5fa1b4c33501b/pkg/cmd/cli/cmd/dockerbuild/dockerbuild.go)
+
+## Run conformance tests (very slow):
+
+```
+go test ./dockerclient/conformance_test.go -tags conformance
+```
diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go
new file mode 100644
index 000000000..6d6e4c38a
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/builder.go
@@ -0,0 +1,510 @@
+package imagebuilder
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ docker "github.com/fsouza/go-dockerclient"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+ "github.com/docker/docker/builder/dockerfile/parser"
+)
+
+// Copy defines a copy operation required on the container.
+type Copy struct {
+ // If true, this is a copy from the file system to the container. If false,
+ // the copy is from the context.
+ FromFS bool
+ // If set, this is a copy from the named stage or image to the container.
+ From string
+ Src []string
+ Dest string
+ Download bool
+}
+
+// Run defines a run operation required in the container.
+type Run struct {
+ Shell bool
+ Args []string
+}
+
+type Executor interface {
+ Preserve(path string) error
+ Copy(excludes []string, copies ...Copy) error
+ Run(run Run, config docker.Config) error
+ UnrecognizedInstruction(step *Step) error
+}
+
+type logExecutor struct{}
+
+func (logExecutor) Preserve(path string) error {
+ log.Printf("PRESERVE %s", path)
+ return nil
+}
+
+func (logExecutor) Copy(excludes []string, copies ...Copy) error {
+ for _, c := range copies {
+ log.Printf("COPY %v -> %s (from:%s download:%t)", c.Src, c.Dest, c.From, c.Download)
+ }
+ return nil
+}
+
+func (logExecutor) Run(run Run, config docker.Config) error {
+ log.Printf("RUN %v %t (%v)", run.Args, run.Shell, config.Env)
+ return nil
+}
+
+func (logExecutor) UnrecognizedInstruction(step *Step) error {
+ log.Printf("Unknown instruction: %s", strings.ToUpper(step.Command))
+ return nil
+}
+
+type noopExecutor struct{}
+
+func (noopExecutor) Preserve(path string) error {
+ return nil
+}
+
+func (noopExecutor) Copy(excludes []string, copies ...Copy) error {
+ return nil
+}
+
+func (noopExecutor) Run(run Run, config docker.Config) error {
+ return nil
+}
+
+func (noopExecutor) UnrecognizedInstruction(step *Step) error {
+ return nil
+}
+
+type VolumeSet []string
+
+func (s *VolumeSet) Add(path string) bool {
+ if path == "/" {
+ set := len(*s) != 1 || (*s)[0] != ""
+ *s = []string{""}
+ return set
+ }
+ path = strings.TrimSuffix(path, "/")
+ var adjusted []string
+ for _, p := range *s {
+ if p == path || strings.HasPrefix(path, p+"/") {
+ return false
+ }
+ if strings.HasPrefix(p, path+"/") {
+ continue
+ }
+ adjusted = append(adjusted, p)
+ }
+ adjusted = append(adjusted, path)
+ *s = adjusted
+ return true
+}
+
+func (s VolumeSet) Has(path string) bool {
+ if path == "/" {
+ return len(s) == 1 && s[0] == ""
+ }
+ path = strings.TrimSuffix(path, "/")
+ for _, p := range s {
+ if p == path {
+ return true
+ }
+ }
+ return false
+}
+
+func (s VolumeSet) Covers(path string) bool {
+ if path == "/" {
+ return len(s) == 1 && s[0] == ""
+ }
+ path = strings.TrimSuffix(path, "/")
+ for _, p := range s {
+ if p == path || strings.HasPrefix(path, p+"/") {
+ return true
+ }
+ }
+ return false
+}
+
+var (
+ LogExecutor = logExecutor{}
+ NoopExecutor = noopExecutor{}
+)
+
+type Stages []Stage
+
+func (stages Stages) ByName(name string) (Stage, bool) {
+ for _, stage := range stages {
+ if stage.Name == name {
+ return stage, true
+ }
+ }
+ return Stage{}, false
+}
+
+func (stages Stages) ByTarget(target string) (Stages, bool) {
+ if len(target) == 0 {
+ return stages, true
+ }
+ for i, stage := range stages {
+ if stage.Name == target {
+ return stages[i : i+1], true
+ }
+ }
+ return nil, false
+}
+
+type Stage struct {
+ Position int
+ Name string
+ Builder *Builder
+ Node *parser.Node
+}
+
+func NewStages(node *parser.Node, b *Builder) Stages {
+ var stages Stages
+ for i, root := range SplitBy(node, command.From) {
+ name, _ := extractNameFromNode(root.Children[0])
+ if len(name) == 0 {
+ name = strconv.Itoa(i)
+ }
+ stages = append(stages, Stage{
+ Position: i,
+ Name: name,
+ Builder: &Builder{
+ Args: b.Args,
+ AllowedArgs: b.AllowedArgs,
+ },
+ Node: root,
+ })
+ }
+ return stages
+}
+
+func extractNameFromNode(node *parser.Node) (string, bool) {
+ if node.Value != command.From {
+ return "", false
+ }
+ n := node.Next
+ if n == nil || n.Next == nil {
+ return "", false
+ }
+ n = n.Next
+ if !strings.EqualFold(n.Value, "as") || n.Next == nil || len(n.Next.Value) == 0 {
+ return "", false
+ }
+ return n.Next.Value, true
+}
+
+type Builder struct {
+ RunConfig docker.Config
+
+ Env []string
+ Args map[string]string
+ CmdSet bool
+ Author string
+
+ AllowedArgs map[string]bool
+ Volumes VolumeSet
+ Excludes []string
+
+ PendingVolumes VolumeSet
+ PendingRuns []Run
+ PendingCopies []Copy
+
+ Warnings []string
+}
+
+func NewBuilder(args map[string]string) *Builder {
+ allowed := make(map[string]bool)
+ for k, v := range builtinAllowedBuildArgs {
+ allowed[k] = v
+ }
+ return &Builder{
+ Args: args,
+ AllowedArgs: allowed,
+ }
+}
+
+func ParseFile(path string) (*parser.Node, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseDockerfile(f)
+}
+
+// Step creates a new step from the current state.
+func (b *Builder) Step() *Step {
+ dst := make([]string, len(b.Env)+len(b.RunConfig.Env))
+ copy(dst, b.Env)
+ dst = append(dst, b.RunConfig.Env...)
+ dst = append(dst, b.Arguments()...)
+ return &Step{Env: dst}
+}
+
+// Run executes a step, transforming the current builder and
+// invoking any Copy or Run operations. noRunsRemaining is an
+// optimization hint that allows the builder to avoid performing
+// unnecessary work.
+func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error {
+ fn, ok := evaluateTable[step.Command]
+ if !ok {
+ return exec.UnrecognizedInstruction(step)
+ }
+ if err := fn(b, step.Args, step.Attrs, step.Flags, step.Original); err != nil {
+ return err
+ }
+
+ copies := b.PendingCopies
+ b.PendingCopies = nil
+ runs := b.PendingRuns
+ b.PendingRuns = nil
+
+ // Once a VOLUME is defined, future ADD/COPY instructions are
+ // all that may mutate that path. Instruct the executor to preserve
+ // the path. The executor must handle invalidating preserved info.
+ for _, path := range b.PendingVolumes {
+ if b.Volumes.Add(path) && !noRunsRemaining {
+ if err := exec.Preserve(path); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := exec.Copy(b.Excludes, copies...); err != nil {
+ return err
+ }
+ for _, run := range runs {
+ config := b.Config()
+ config.Env = step.Env
+ if err := exec.Run(run, *config); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// RequiresStart returns true if a running container environment is necessary
+// to invoke the provided commands
+func (b *Builder) RequiresStart(node *parser.Node) bool {
+ for _, child := range node.Children {
+ if child.Value == command.Run {
+ return true
+ }
+ }
+ return false
+}
+
+// Config returns a snapshot of the current RunConfig intended for
+// use with a container commit.
+func (b *Builder) Config() *docker.Config {
+ config := b.RunConfig
+ if config.OnBuild == nil {
+ config.OnBuild = []string{}
+ }
+ if config.Entrypoint == nil {
+ config.Entrypoint = []string{}
+ }
+ config.Image = ""
+ return &config
+}
+
+// Arguments returns the currently active arguments.
+func (b *Builder) Arguments() []string {
+ var envs []string
+ for key, val := range b.Args {
+ if _, ok := b.AllowedArgs[key]; ok {
+ envs = append(envs, fmt.Sprintf("%s=%s", key, val))
+ }
+ }
+ return envs
+}
+
+// ErrNoFROM is returned if the Dockerfile did not contain a FROM
+// statement.
+var ErrNoFROM = fmt.Errorf("no FROM statement found")
+
+// From returns the image this dockerfile depends on, or an error
+// if no FROM is found or if multiple FROM are specified. If a
+// single from is found the passed node is updated with only
+// the remaining statements. The builder's RunConfig.Image field
+// is set to the first From found, or left unchanged if already
+// set.
+func (b *Builder) From(node *parser.Node) (string, error) {
+ children := SplitChildren(node, command.From)
+ switch {
+ case len(children) == 0:
+ return "", ErrNoFROM
+ case len(children) > 1:
+ return "", fmt.Errorf("multiple FROM statements are not supported")
+ default:
+ step := b.Step()
+ if err := step.Resolve(children[0]); err != nil {
+ return "", err
+ }
+ if err := b.Run(step, NoopExecutor, false); err != nil {
+ return "", err
+ }
+ return b.RunConfig.Image, nil
+ }
+}
+
+// FromImage updates the builder to use the provided image (resetting RunConfig
+// and recording the image environment), and updates the node with any ONBUILD
+// statements extracted from the parent image.
+func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error {
+ SplitChildren(node, command.From)
+
+ b.RunConfig = *image.Config
+ b.Env = b.RunConfig.Env
+ b.RunConfig.Env = nil
+
+ // Check to see if we have a default PATH, note that windows won't
+ // have one as its set by HCS
+ if runtime.GOOS != "windows" && !hasEnvName(b.Env, "PATH") {
+ b.RunConfig.Env = append(b.RunConfig.Env, "PATH="+defaultPathEnv)
+ }
+
+ // Join the image onbuild statements into node
+ if image.Config == nil || len(image.Config.OnBuild) == 0 {
+ return nil
+ }
+ extra, err := ParseDockerfile(bytes.NewBufferString(strings.Join(image.Config.OnBuild, "\n")))
+ if err != nil {
+ return err
+ }
+ for _, child := range extra.Children {
+ switch strings.ToUpper(child.Value) {
+ case "ONBUILD":
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+ case "MAINTAINER", "FROM":
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", child.Value)
+ }
+ }
+ node.Children = append(extra.Children, node.Children...)
+ // Since we've processed the OnBuild statements, clear them from the runconfig state.
+ b.RunConfig.OnBuild = nil
+ return nil
+}
+
+// SplitChildren removes any children with the provided value from node
+// and returns them as an array. node.Children is updated.
+func SplitChildren(node *parser.Node, value string) []*parser.Node {
+ var split []*parser.Node
+ var children []*parser.Node
+ for _, child := range node.Children {
+ if child.Value == value {
+ split = append(split, child)
+ } else {
+ children = append(children, child)
+ }
+ }
+ node.Children = children
+ return split
+}
+
+func SplitBy(node *parser.Node, value string) []*parser.Node {
+ var split []*parser.Node
+ var current *parser.Node
+ for _, child := range node.Children {
+ if current == nil || child.Value == value {
+ copied := *node
+ current = &copied
+ current.Children = nil
+ current.Next = nil
+ split = append(split, current)
+ }
+ current.Children = append(current.Children, child)
+ }
+ return split
+}
+
+// StepFunc is invoked with the result of a resolved step.
+type StepFunc func(*Builder, []string, map[string]bool, []string, string) error
+
+var evaluateTable = map[string]StepFunc{
+ command.Env: env,
+ command.Label: label,
+ command.Maintainer: maintainer,
+ command.Add: add,
+ command.Copy: dispatchCopy, // copy() is a go builtin
+ command.From: from,
+ command.Onbuild: onbuild,
+ command.Workdir: workdir,
+ command.Run: run,
+ command.Cmd: cmd,
+ command.Entrypoint: entrypoint,
+ command.Expose: expose,
+ command.Volume: volume,
+ command.User: user,
+ command.StopSignal: stopSignal,
+ command.Arg: arg,
+ command.Healthcheck: healthcheck,
+ command.Shell: shell,
+}
+
+// builtinAllowedBuildArgs is list of built-in allowed build args
+var builtinAllowedBuildArgs = map[string]bool{
+ "HTTP_PROXY": true,
+ "http_proxy": true,
+ "HTTPS_PROXY": true,
+ "https_proxy": true,
+ "FTP_PROXY": true,
+ "ftp_proxy": true,
+ "NO_PROXY": true,
+ "no_proxy": true,
+}
+
+// ParseDockerIgnore returns a list of the excludes in the .dockerignore file.
+// extracted from fsouza/go-dockerclient.
+func ParseDockerignore(root string) ([]string, error) {
+ var excludes []string
+ ignore, err := ioutil.ReadFile(filepath.Join(root, ".dockerignore"))
+ if err != nil && !os.IsNotExist(err) {
+ return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
+ }
+ return strings.Split(string(ignore), "\n"), nil
+}
+
+// ExportEnv creates an export statement for a shell that contains all of the
+// provided environment.
+func ExportEnv(env []string) string {
+ if len(env) == 0 {
+ return ""
+ }
+ out := "export"
+ for _, e := range env {
+ if len(e) == 0 {
+ continue
+ }
+ out += " " + BashQuote(e)
+ }
+ return out + "; "
+}
+
+// BashQuote escapes the provided string and surrounds it with double quotes.
+// TODO: verify that these are all we have to escape.
+func BashQuote(env string) string {
+ out := []rune{'"'}
+ for _, r := range env {
+ switch r {
+ case '$', '\\', '"':
+ out = append(out, '\\', r)
+ default:
+ out = append(out, r)
+ }
+ }
+ out = append(out, '"')
+ return string(out)
+}
diff --git a/vendor/github.com/openshift/imagebuilder/constants.go b/vendor/github.com/openshift/imagebuilder/constants.go
new file mode 100644
index 000000000..86cd2e5e2
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/constants.go
@@ -0,0 +1,13 @@
+package imagebuilder
+
+const (
+ // in docker/system
+ NoBaseImageSpecifier = "scratch"
+
+ // not yet part of our import
+ commandArg = "arg"
+ commandStopSignal = "stopsignal"
+
+ // in docker/system
+ defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+)
diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go
new file mode 100644
index 000000000..afa04bb89
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go
@@ -0,0 +1,570 @@
+package imagebuilder
+
+// This file contains the dispatchers for each command. Note that
+// `nullDispatch` is not actually a command, but support for commands we parse
+// but do nothing with.
+//
+// See evaluator.go for a higher level discussion of the whole evaluator
+// package.
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+
+ docker "github.com/fsouza/go-dockerclient"
+
+ "github.com/openshift/imagebuilder/signal"
+ "github.com/openshift/imagebuilder/strslice"
+)
+
+var (
+ obRgex = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`)
+)
+
+// dispatch with no layer / parsing. This is effectively not a command.
+func nullDispatch(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ return nil
+}
+
+// ENV foo bar
+//
+// Sets the environment variable foo to bar, also makes interpolation
+// in the dockerfile available from the next statement on via ${foo}.
+//
+func env(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("ENV")
+ }
+
+ if len(args)%2 != 0 {
+ // should never get here, but just in case
+ return errTooManyArguments("ENV")
+ }
+
+ // TODO/FIXME/NOT USED
+ // Just here to show how to use the builder flags stuff within the
+ // context of a builder command. Will remove once we actually add
+ // a builder command to something!
+ /*
+ flBool1 := b.flags.AddBool("bool1", false)
+ flStr1 := b.flags.AddString("str1", "HI")
+
+ if err := b.flags.Parse(); err != nil {
+ return err
+ }
+
+ fmt.Printf("Bool1:%v\n", flBool1)
+ fmt.Printf("Str1:%v\n", flStr1)
+ */
+
+ for j := 0; j < len(args); j++ {
+ // name ==> args[j]
+ // value ==> args[j+1]
+ newVar := args[j] + "=" + args[j+1] + ""
+ gotOne := false
+ for i, envVar := range b.RunConfig.Env {
+ envParts := strings.SplitN(envVar, "=", 2)
+ if envParts[0] == args[j] {
+ b.RunConfig.Env[i] = newVar
+ b.Env = append([]string{newVar}, b.Env...)
+ gotOne = true
+ break
+ }
+ }
+ if !gotOne {
+ b.RunConfig.Env = append(b.RunConfig.Env, newVar)
+ b.Env = append([]string{newVar}, b.Env...)
+ }
+ j++
+ }
+
+ return nil
+}
+
+// MAINTAINER some text <maybe@an.email.address>
+//
+// Sets the maintainer metadata.
+func maintainer(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("MAINTAINER")
+ }
+ b.Author = args[0]
+ return nil
+}
+
+// LABEL some json data describing the image
+//
+// Sets the Label variable foo to bar,
+//
+func label(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("LABEL")
+ }
+ if len(args)%2 != 0 {
+ // should never get here, but just in case
+ return errTooManyArguments("LABEL")
+ }
+
+ if b.RunConfig.Labels == nil {
+ b.RunConfig.Labels = map[string]string{}
+ }
+
+ for j := 0; j < len(args); j++ {
+ // name ==> args[j]
+ // value ==> args[j+1]
+ b.RunConfig.Labels[args[j]] = args[j+1]
+ j++
+ }
+ return nil
+}
+
+// ADD foo /path
+//
+// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
+// exist here. If you do not wish to have this automatic handling, use COPY.
+//
+func add(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) < 2 {
+ return errAtLeastOneArgument("ADD")
+ }
+ last := len(args) - 1
+ dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
+ b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0:last], Dest: dest, Download: true})
+ return nil
+}
+
+// COPY foo /path
+//
+// Same as 'ADD' but without the tar and remote url handling.
+//
+func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) < 2 {
+ return errAtLeastOneArgument("COPY")
+ }
+ last := len(args) - 1
+ dest := makeAbsolute(args[last], b.RunConfig.WorkingDir)
+ var from string
+ if len(flagArgs) > 0 {
+ for _, arg := range flagArgs {
+ switch {
+ case strings.HasPrefix(arg, "--from="):
+ from = strings.TrimPrefix(arg, "--from=")
+ default:
+ return fmt.Errorf("COPY only supports the --from=<image|stage> flag")
+ }
+ }
+ }
+ b.PendingCopies = append(b.PendingCopies, Copy{From: from, Src: args[0:last], Dest: dest, Download: false})
+ return nil
+}
+
+// FROM imagename
+//
+// This sets the image the dockerfile will build on top of.
+//
+func from(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ switch {
+ case len(args) == 1:
+ case len(args) == 3 && len(args[0]) > 0 && strings.EqualFold(args[1], "as") && len(args[2]) > 0:
+
+ default:
+ return fmt.Errorf("FROM requires either one argument, or three: FROM <source> [as <name>]")
+ }
+
+ name := args[0]
+ // Windows cannot support a container with no base image.
+ if name == NoBaseImageSpecifier {
+ if runtime.GOOS == "windows" {
+ return fmt.Errorf("Windows does not support FROM scratch")
+ }
+ }
+ b.RunConfig.Image = name
+ // TODO: handle onbuild
+ return nil
+}
+
+// ONBUILD RUN echo yo
+//
+// ONBUILD triggers run when the image is used in a FROM statement.
+//
+// ONBUILD handling has a lot of special-case functionality, the heading in
+// evaluator.go and comments around dispatch() in the same file explain the
+// special cases. search for 'OnBuild' in internals.go for additional special
+// cases.
+//
+func onbuild(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("ONBUILD")
+ }
+
+ triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
+ switch triggerInstruction {
+ case "ONBUILD":
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+ case "MAINTAINER", "FROM":
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
+ }
+
+ original = obRgex.ReplaceAllString(original, "")
+
+ b.RunConfig.OnBuild = append(b.RunConfig.OnBuild, original)
+ return nil
+}
+
+// WORKDIR /tmp
+//
+// Set the working directory for future RUN/CMD/etc statements.
+//
+func workdir(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("WORKDIR")
+ }
+
+ // This is from the Dockerfile and will not necessarily be in platform
+ // specific semantics, hence ensure it is converted.
+ workdir := filepath.FromSlash(args[0])
+
+ if !filepath.IsAbs(workdir) {
+ current := filepath.FromSlash(b.RunConfig.WorkingDir)
+ workdir = filepath.Join(string(os.PathSeparator), current, workdir)
+ }
+
+ b.RunConfig.WorkingDir = workdir
+ return nil
+}
+
+// RUN some command yo
+//
+// run a command and commit the image. Args are automatically prepended with
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
+// only one argument. The difference in processing:
+//
+// RUN echo hi # sh -c echo hi (Linux)
+// RUN echo hi # cmd /S /C echo hi (Windows)
+// RUN [ "echo", "hi" ] # echo hi
+//
+func run(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if b.RunConfig.Image == "" {
+ return fmt.Errorf("Please provide a source image with `from` prior to run")
+ }
+
+ args = handleJSONArgs(args, attributes)
+
+ run := Run{Args: args}
+
+ if !attributes["json"] {
+ run.Shell = true
+ }
+ b.PendingRuns = append(b.PendingRuns, run)
+ return nil
+}
+
+// CMD foo
+//
+// Set the default command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+//
+func cmd(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ cmdSlice := handleJSONArgs(args, attributes)
+
+ if !attributes["json"] {
+ if runtime.GOOS != "windows" {
+ cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+ } else {
+ cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...)
+ }
+ }
+
+ b.RunConfig.Cmd = strslice.StrSlice(cmdSlice)
+ if len(args) != 0 {
+ b.CmdSet = true
+ }
+ return nil
+}
+
+// ENTRYPOINT /usr/sbin/nginx
+//
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx.
+//
+// Handles command processing similar to CMD and RUN, only b.RunConfig.Entrypoint
+// is initialized at NewBuilder time instead of through argument parsing.
+//
+func entrypoint(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ parsed := handleJSONArgs(args, attributes)
+
+ switch {
+ case attributes["json"]:
+ // ENTRYPOINT ["echo", "hi"]
+ b.RunConfig.Entrypoint = strslice.StrSlice(parsed)
+ case len(parsed) == 0:
+ // ENTRYPOINT []
+ b.RunConfig.Entrypoint = nil
+ default:
+ // ENTRYPOINT echo hi
+ if runtime.GOOS != "windows" {
+ b.RunConfig.Entrypoint = strslice.StrSlice{"/bin/sh", "-c", parsed[0]}
+ } else {
+ b.RunConfig.Entrypoint = strslice.StrSlice{"cmd", "/S", "/C", parsed[0]}
+ }
+ }
+
+ // when setting the entrypoint if a CMD was not explicitly set then
+ // set the command to nil
+ if !b.CmdSet {
+ b.RunConfig.Cmd = nil
+ }
+ return nil
+}
+
+// EXPOSE 6667/tcp 7000/tcp
+//
+// Expose ports for links and port mappings. This all ends up in
+// b.RunConfig.ExposedPorts for runconfig.
+//
+func expose(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("EXPOSE")
+ }
+
+ if b.RunConfig.ExposedPorts == nil {
+ b.RunConfig.ExposedPorts = make(map[docker.Port]struct{})
+ }
+
+ existing := map[string]struct{}{}
+ for k := range b.RunConfig.ExposedPorts {
+ existing[k.Port()] = struct{}{}
+ }
+
+ for _, port := range args {
+ dp := docker.Port(port)
+ if _, exists := existing[dp.Port()]; !exists {
+ b.RunConfig.ExposedPorts[docker.Port(fmt.Sprintf("%s/%s", dp.Port(), dp.Proto()))] = struct{}{}
+ }
+ }
+ return nil
+}
+
+// USER foo
+//
+// Set the user to 'foo' for future commands and when running the
+// ENTRYPOINT/CMD at container run time.
+//
+func user(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("USER")
+ }
+
+ b.RunConfig.User = args[0]
+ return nil
+}
+
+// VOLUME /foo
+//
+// Expose the volume /foo for use. Will also accept the JSON array form.
+//
+func volume(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("VOLUME")
+ }
+
+ if b.RunConfig.Volumes == nil {
+ b.RunConfig.Volumes = map[string]struct{}{}
+ }
+ for _, v := range args {
+ v = strings.TrimSpace(v)
+ if v == "" {
+ return fmt.Errorf("Volume specified can not be an empty string")
+ }
+ b.RunConfig.Volumes[v] = struct{}{}
+ b.PendingVolumes.Add(v)
+ }
+ return nil
+}
+
+// STOPSIGNAL signal
+//
+// Set the signal that will be used to kill the container.
+func stopSignal(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return errExactlyOneArgument("STOPSIGNAL")
+ }
+
+ sig := args[0]
+ if err := signal.CheckSignal(sig); err != nil {
+ return err
+ }
+
+ b.RunConfig.StopSignal = sig
+ return nil
+}
+
+// HEALTHCHECK foo
+//
+// Set the default healthcheck command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+//
+func healthcheck(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) == 0 {
+ return errAtLeastOneArgument("HEALTHCHECK")
+ }
+ typ := strings.ToUpper(args[0])
+ args = args[1:]
+ if typ == "NONE" {
+ if len(args) != 0 {
+ return fmt.Errorf("HEALTHCHECK NONE takes no arguments")
+ }
+ test := strslice.StrSlice{typ}
+ b.RunConfig.Healthcheck = &docker.HealthConfig{
+ Test: test,
+ }
+ } else {
+ if b.RunConfig.Healthcheck != nil {
+ oldCmd := b.RunConfig.Healthcheck.Test
+ if len(oldCmd) > 0 && oldCmd[0] != "NONE" {
+ b.Warnings = append(b.Warnings, fmt.Sprintf("Note: overriding previous HEALTHCHECK: %v\n", oldCmd))
+ }
+ }
+
+ healthcheck := docker.HealthConfig{}
+
+ flags := flag.NewFlagSet("", flag.ContinueOnError)
+ flags.String("interval", "", "")
+ flags.String("timeout", "", "")
+ flRetries := flags.String("retries", "", "")
+
+ if err := flags.Parse(flagArgs); err != nil {
+ return err
+ }
+
+ switch typ {
+ case "CMD":
+ cmdSlice := handleJSONArgs(args, attributes)
+ if len(cmdSlice) == 0 {
+ return fmt.Errorf("Missing command after HEALTHCHECK CMD")
+ }
+
+ if !attributes["json"] {
+ typ = "CMD-SHELL"
+ }
+
+ healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
+ default:
+ return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
+ }
+
+ interval, err := parseOptInterval(flags.Lookup("interval"))
+ if err != nil {
+ return err
+ }
+ healthcheck.Interval = interval
+
+ timeout, err := parseOptInterval(flags.Lookup("timeout"))
+ if err != nil {
+ return err
+ }
+ healthcheck.Timeout = timeout
+
+ if *flRetries != "" {
+ retries, err := strconv.ParseInt(*flRetries, 10, 32)
+ if err != nil {
+ return err
+ }
+ if retries < 1 {
+ return fmt.Errorf("--retries must be at least 1 (not %d)", retries)
+ }
+ healthcheck.Retries = int(retries)
+ } else {
+ healthcheck.Retries = 0
+ }
+ b.RunConfig.Healthcheck = &healthcheck
+ }
+
+ return nil
+}
+
+// ARG name[=value]
+//
+// Adds the variable foo to the trusted list of variables that can be passed
+// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
+// Dockerfile author may optionally set a default value of this variable.
+func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ if len(args) != 1 {
+ return fmt.Errorf("ARG requires exactly one argument definition")
+ }
+
+ var (
+ name string
+ value string
+ hasDefault bool
+ )
+
+ arg := args[0]
+ // 'arg' can just be a name or name-value pair. Note that this is different
+ // from 'env' that handles the split of name and value at the parser level.
+ // The reason for doing it differently for 'arg' is that we support just
+ // defining an arg and not assign it a value (while 'env' always expects a
+ // name-value pair). If possible, it will be good to harmonize the two.
+ if strings.Contains(arg, "=") {
+ parts := strings.SplitN(arg, "=", 2)
+ name = parts[0]
+ value = parts[1]
+ hasDefault = true
+ } else {
+ name = arg
+ hasDefault = false
+ }
+ // add the arg to allowed list of build-time args from this step on.
+ b.AllowedArgs[name] = true
+
+ // If there is a default value associated with this arg then add it to the
+ // b.buildArgs if one is not already passed to the builder. The args passed
+ // to builder override the default value of 'arg'.
+ if _, ok := b.Args[name]; !ok && hasDefault {
+ b.Args[name] = value
+ }
+
+ return nil
+}
+
+// SHELL powershell -command
+//
+// Set the non-default shell to use.
+func shell(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error {
+ shellSlice := handleJSONArgs(args, attributes)
+ switch {
+ case len(shellSlice) == 0:
+ // SHELL []
+ return errAtLeastOneArgument("SHELL")
+ case attributes["json"]:
+ // SHELL ["powershell", "-command"]
+ b.RunConfig.Shell = strslice.StrSlice(shellSlice)
+ // b.RunConfig.Shell = strslice.StrSlice(shellSlice)
+ default:
+ // SHELL powershell -command - not JSON
+ return errNotJSON("SHELL")
+ }
+ return nil
+}
+
+func errAtLeastOneArgument(command string) error {
+ return fmt.Errorf("%s requires at least one argument", command)
+}
+
+func errExactlyOneArgument(command string) error {
+ return fmt.Errorf("%s requires exactly one argument", command)
+}
+
+func errTooManyArguments(command string) error {
+ return fmt.Errorf("Bad input to %s, too many arguments", command)
+}
+
+func errNotJSON(command string) error {
+ return fmt.Errorf("%s requires the arguments to be in JSON form", command)
+}
diff --git a/vendor/github.com/openshift/imagebuilder/doc.go b/vendor/github.com/openshift/imagebuilder/doc.go
new file mode 100644
index 000000000..97028ffc8
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/doc.go
@@ -0,0 +1,6 @@
+// Package builder uses code from github.com/docker/docker/builder/* to implement
+// a Docker builder that does not create individual layers, but instead creates a
+// single layer.
+//
+// TODO: full windows support
+package imagebuilder
diff --git a/vendor/github.com/openshift/imagebuilder/evaluator.go b/vendor/github.com/openshift/imagebuilder/evaluator.go
new file mode 100644
index 000000000..83263127e
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/evaluator.go
@@ -0,0 +1,160 @@
+package imagebuilder
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/docker/docker/builder/dockerfile/command"
+ "github.com/docker/docker/builder/dockerfile/parser"
+)
+
+// ParseDockerfile parses the provided stream as a canonical Dockerfile
+func ParseDockerfile(r io.Reader) (*parser.Node, error) {
+ result, err := parser.Parse(r)
+ if err != nil {
+ return nil, err
+ }
+ return result.AST, nil
+}
+
+// Environment variable interpolation will happen on these statements only.
+var replaceEnvAllowed = map[string]bool{
+ command.Env: true,
+ command.Label: true,
+ command.Add: true,
+ command.Copy: true,
+ command.Workdir: true,
+ command.Expose: true,
+ command.Volume: true,
+ command.User: true,
+ commandStopSignal: true,
+ commandArg: true,
+}
+
+// Certain commands are allowed to have their args split into more
+// words after env var replacements. Meaning:
+// ENV foo="123 456"
+// EXPOSE $foo
+// should result in the same thing as:
+// EXPOSE 123 456
+// and not treat "123 456" as a single word.
+// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing.
+// Quotes will cause it to still be treated as single word.
+var allowWordExpansion = map[string]bool{
+ command.Expose: true,
+}
+
+// Step represents the input Env and the output command after all
+// post processing of the command arguments is done.
+type Step struct {
+ Env []string
+
+ Command string
+ Args []string
+ Flags []string
+ Attrs map[string]bool
+ Message string
+ Original string
+}
+
+// Resolve transforms a parsed Dockerfile line into a command to execute,
+// resolving any arguments.
+//
+// Almost all nodes will have this structure:
+// Child[Node, Node, Node] where Child is from parser.Node.Children and each
+// node comes from parser.Node.Next. This forms a "line" with a statement and
+// arguments and we process them in this normalized form by hitting
+// evaluateTable with the leaf nodes of the command and the Builder object.
+//
+// ONBUILD is a special case; in this case the parser will emit:
+// Child[Node, Child[Node, Node...]] where the first node is the literal
+// "onbuild" and the child entrypoint is the command of the ONBUILD statement,
+// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
+// deal with that, at least until it becomes more of a general concern with new
+// features.
+func (b *Step) Resolve(ast *parser.Node) error {
+ cmd := ast.Value
+ upperCasedCmd := strings.ToUpper(cmd)
+
+ // To ensure the user is given a decent error message if the platform
+ // on which the daemon is running does not support a builder command.
+ if err := platformSupports(strings.ToLower(cmd)); err != nil {
+ return err
+ }
+
+ attrs := ast.Attributes
+ original := ast.Original
+ flags := ast.Flags
+ strList := []string{}
+ msg := upperCasedCmd
+
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
+ if cmd == "onbuild" {
+ if ast.Next == nil {
+ return fmt.Errorf("ONBUILD requires at least one argument")
+ }
+ ast = ast.Next.Children[0]
+ strList = append(strList, ast.Value)
+ msg += " " + ast.Value
+
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
+ }
+
+ // count the number of nodes that we are going to traverse first
+ // so we can pre-create the argument and message array. This speeds up the
+ // allocation of those list a lot when they have a lot of arguments
+ cursor := ast
+ var n int
+ for cursor.Next != nil {
+ cursor = cursor.Next
+ n++
+ }
+ msgList := make([]string, n)
+
+ var i int
+ envs := b.Env
+ for ast.Next != nil {
+ ast = ast.Next
+ var str string
+ str = ast.Value
+ if replaceEnvAllowed[cmd] {
+ var err error
+ var words []string
+
+ if allowWordExpansion[cmd] {
+ words, err = ProcessWords(str, envs)
+ if err != nil {
+ return err
+ }
+ strList = append(strList, words...)
+ } else {
+ str, err = ProcessWord(str, envs)
+ if err != nil {
+ return err
+ }
+ strList = append(strList, str)
+ }
+ } else {
+ strList = append(strList, str)
+ }
+ msgList[i] = ast.Value
+ i++
+ }
+
+ msg += " " + strings.Join(msgList, " ")
+
+ b.Message = msg
+ b.Command = cmd
+ b.Args = strList
+ b.Original = original
+ b.Attrs = attrs
+ b.Flags = flags
+ return nil
+}
diff --git a/vendor/github.com/openshift/imagebuilder/internals.go b/vendor/github.com/openshift/imagebuilder/internals.go
new file mode 100644
index 000000000..9a8005bfc
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/internals.go
@@ -0,0 +1,83 @@
+package imagebuilder
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// hasEnvName returns true if the provided environment contains the named ENV var.
+func hasEnvName(env []string, name string) bool {
+ for _, e := range env {
+ if strings.HasPrefix(e, name+"=") {
+ return true
+ }
+ }
+ return false
+}
+
+// platformSupports is a short-term function to give users a quality error
+// message if a Dockerfile uses a command not supported on the platform.
+func platformSupports(command string) error {
+ if runtime.GOOS != "windows" {
+ return nil
+ }
+ switch command {
+ case "expose", "user", "stopsignal", "arg":
+ return fmt.Errorf("The daemon on this platform does not support the command '%s'", command)
+ }
+ return nil
+}
+
+func handleJSONArgs(args []string, attributes map[string]bool) []string {
+ if len(args) == 0 {
+ return []string{}
+ }
+
+ if attributes != nil && attributes["json"] {
+ return args
+ }
+
+ // literal string command, not an exec array
+ return []string{strings.Join(args, " ")}
+}
+
+// makeAbsolute ensures that the provided path is absolute.
+func makeAbsolute(dest, workingDir string) string {
+ // Twiddle the destination when its a relative path - meaning, make it
+ // relative to the WORKINGDIR
+ if !filepath.IsAbs(dest) {
+ hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) || strings.HasSuffix(dest, string(os.PathSeparator)+".")
+ dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
+
+ // Make sure we preserve any trailing slash
+ if hasSlash {
+ dest += string(os.PathSeparator)
+ }
+ }
+ return dest
+}
+
+// parseOptInterval(flag) is the duration of flag.Value, or 0 if
+// empty. An error is reported if the value is given and is not positive.
+func parseOptInterval(f *flag.Flag) (time.Duration, error) {
+ if f == nil {
+ return 0, fmt.Errorf("No flag defined")
+ }
+ s := f.Value.String()
+ if s == "" {
+ return 0, nil
+ }
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return 0, err
+ }
+ if d <= 0 {
+ return 0, fmt.Errorf("Interval %#v must be positive", f.Name)
+ }
+ return d, nil
+}
diff --git a/vendor/github.com/openshift/imagebuilder/shell_parser.go b/vendor/github.com/openshift/imagebuilder/shell_parser.go
new file mode 100644
index 000000000..65f1db6dc
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/shell_parser.go
@@ -0,0 +1,314 @@
+package imagebuilder
+
+// This will take a single word and an array of env variables and
+// process all quotes (" and ') as well as $xxx and ${xxx} env variable
+// tokens. Tries to mimic bash shell process.
+// It doesn't support all flavors of ${xx:...} formats but new ones can
+// be added by adding code to the "special ${} format processing" section
+
+import (
+ "fmt"
+ "strings"
+ "text/scanner"
+ "unicode"
+)
+
+type shellWord struct {
+ word string
+ scanner scanner.Scanner
+ envs []string
+ pos int
+}
+
+// ProcessWord will use the 'env' list of environment variables,
+// and replace any env var references in 'word'.
+func ProcessWord(word string, env []string) (string, error) {
+ sw := &shellWord{
+ word: word,
+ envs: env,
+ pos: 0,
+ }
+ sw.scanner.Init(strings.NewReader(word))
+ word, _, err := sw.process()
+ return word, err
+}
+
+// ProcessWords will use the 'env' list of environment variables,
+// and replace any env var references in 'word' then it will also
+// return a slice of strings which represents the 'word'
+// split up based on spaces - taking into account quotes. Note that
+// this splitting is done **after** the env var substitutions are done.
+// Note, each one is trimmed to remove leading and trailing spaces (unless
+// they are quoted", but ProcessWord retains spaces between words.
+func ProcessWords(word string, env []string) ([]string, error) {
+ sw := &shellWord{
+ word: word,
+ envs: env,
+ pos: 0,
+ }
+ sw.scanner.Init(strings.NewReader(word))
+ _, words, err := sw.process()
+ return words, err
+}
+
+func (sw *shellWord) process() (string, []string, error) {
+ return sw.processStopOn(scanner.EOF)
+}
+
+type wordsStruct struct {
+ word string
+ words []string
+ inWord bool
+}
+
+func (w *wordsStruct) addChar(ch rune) {
+ if unicode.IsSpace(ch) && w.inWord {
+ if len(w.word) != 0 {
+ w.words = append(w.words, w.word)
+ w.word = ""
+ w.inWord = false
+ }
+ } else if !unicode.IsSpace(ch) {
+ w.addRawChar(ch)
+ }
+}
+
+func (w *wordsStruct) addRawChar(ch rune) {
+ w.word += string(ch)
+ w.inWord = true
+}
+
+func (w *wordsStruct) addString(str string) {
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(str))
+ for scan.Peek() != scanner.EOF {
+ w.addChar(scan.Next())
+ }
+}
+
+func (w *wordsStruct) addRawString(str string) {
+ w.word += str
+ w.inWord = true
+}
+
+func (w *wordsStruct) getWords() []string {
+ if len(w.word) > 0 {
+ w.words = append(w.words, w.word)
+
+ // Just in case we're called again by mistake
+ w.word = ""
+ w.inWord = false
+ }
+ return w.words
+}
+
+// Process the word, starting at 'pos', and stop when we get to the
+// end of the word or the 'stopChar' character
+func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) {
+ var result string
+ var words wordsStruct
+
+ var charFuncMapping = map[rune]func() (string, error){
+ '\'': sw.processSingleQuote,
+ '"': sw.processDoubleQuote,
+ '$': sw.processDollar,
+ }
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+
+ if stopChar != scanner.EOF && ch == stopChar {
+ sw.scanner.Next()
+ break
+ }
+ if fn, ok := charFuncMapping[ch]; ok {
+ // Call special processing func for certain chars
+ tmp, err := fn()
+ if err != nil {
+ return "", []string{}, err
+ }
+ result += tmp
+
+ if ch == rune('$') {
+ words.addString(tmp)
+ } else {
+ words.addRawString(tmp)
+ }
+ } else {
+ // Not special, just add it to the result
+ ch = sw.scanner.Next()
+
+ if ch == '\\' {
+ // '\' escapes, except end of line
+
+ ch = sw.scanner.Next()
+
+ if ch == scanner.EOF {
+ break
+ }
+
+ words.addRawChar(ch)
+ } else {
+ words.addChar(ch)
+ }
+
+ result += string(ch)
+ }
+ }
+
+ return result, words.getWords(), nil
+}
+
+func (sw *shellWord) processSingleQuote() (string, error) {
+ // All chars between single quotes are taken as-is
+ // Note, you can't escape '
+ var result string
+
+ sw.scanner.Next()
+
+ for {
+ ch := sw.scanner.Next()
+ if ch == '\'' || ch == scanner.EOF {
+ break
+ }
+ result += string(ch)
+ }
+
+ return result, nil
+}
+
+func (sw *shellWord) processDoubleQuote() (string, error) {
+ // All chars up to the next " are taken as-is, even ', except any $ chars
+ // But you can escape " with a \
+ var result string
+
+ sw.scanner.Next()
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+ if ch == '"' {
+ sw.scanner.Next()
+ break
+ }
+ if ch == '$' {
+ tmp, err := sw.processDollar()
+ if err != nil {
+ return "", err
+ }
+ result += tmp
+ } else {
+ ch = sw.scanner.Next()
+ if ch == '\\' {
+ chNext := sw.scanner.Peek()
+
+ if chNext == scanner.EOF {
+ // Ignore \ at end of word
+ continue
+ }
+
+ if chNext == '"' || chNext == '$' {
+ // \" and \$ can be escaped, all other \'s are left as-is
+ ch = sw.scanner.Next()
+ }
+ }
+ result += string(ch)
+ }
+ }
+
+ return result, nil
+}
+
+func (sw *shellWord) processDollar() (string, error) {
+ sw.scanner.Next()
+ ch := sw.scanner.Peek()
+ if ch == '{' {
+ sw.scanner.Next()
+ name := sw.processName()
+ ch = sw.scanner.Peek()
+ if ch == '}' {
+ // Normal ${xx} case
+ sw.scanner.Next()
+ return sw.getEnv(name), nil
+ }
+ if ch == ':' {
+ // Special ${xx:...} format processing
+ // Yes it allows for recursive $'s in the ... spot
+
+ sw.scanner.Next() // skip over :
+ modifier := sw.scanner.Next()
+
+ word, _, err := sw.processStopOn('}')
+ if err != nil {
+ return "", err
+ }
+
+ // Grab the current value of the variable in question so we
+ // can use it to determine what to do based on the modifier
+ newValue := sw.getEnv(name)
+
+ switch modifier {
+ case '+':
+ if newValue != "" {
+ newValue = word
+ }
+ return newValue, nil
+
+ case '-':
+ if newValue == "" {
+ newValue = word
+ }
+ return newValue, nil
+
+ default:
+ return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word)
+ }
+ }
+ return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word)
+ }
+ // $xxx case
+ name := sw.processName()
+ if name == "" {
+ return "$", nil
+ }
+ return sw.getEnv(name), nil
+}
+
+func (sw *shellWord) processName() string {
+ // Read in a name (alphanumeric or _)
+ // If it starts with a numeric then just return $#
+ var name string
+
+ for sw.scanner.Peek() != scanner.EOF {
+ ch := sw.scanner.Peek()
+ if len(name) == 0 && unicode.IsDigit(ch) {
+ ch = sw.scanner.Next()
+ return string(ch)
+ }
+ if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
+ break
+ }
+ ch = sw.scanner.Next()
+ name += string(ch)
+ }
+
+ return name
+}
+
+func (sw *shellWord) getEnv(name string) string {
+ for _, env := range sw.envs {
+ i := strings.Index(env, "=")
+ if i < 0 {
+ if name == env {
+ // Should probably never get here, but just in case treat
+ // it like "var" and "var=" are the same
+ return ""
+ }
+ continue
+ }
+ if name != env[:i] {
+ continue
+ }
+ return env[i+1:]
+ }
+ return ""
+}
diff --git a/vendor/github.com/openshift/imagebuilder/signal/README.md b/vendor/github.com/openshift/imagebuilder/signal/README.md
new file mode 100644
index 000000000..2b237a594
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/signal/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file
diff --git a/vendor/github.com/openshift/imagebuilder/signal/signal.go b/vendor/github.com/openshift/imagebuilder/signal/signal.go
new file mode 100644
index 000000000..46493965d
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/signal/signal.go
@@ -0,0 +1,25 @@
+// Package signal provides helper functions for dealing with signals across
+// various operating systems.
+package signal
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// CheckSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func CheckSignal(rawSignal string) error {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return nil
+ }
+ if _, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]; !ok {
+ return fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/imagebuilder/signal/signals.go b/vendor/github.com/openshift/imagebuilder/signal/signals.go
new file mode 100644
index 000000000..41d6fbd95
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/signal/signals.go
@@ -0,0 +1,79 @@
+package signal
+
+// SignalMap is a map of supported signals.
+var SignalMap = map[string]struct{}{
+ "ABRT": {},
+ "ALRM": {},
+ "BUS": {},
+ "CHLD": {},
+ "CLD": {},
+ "CONT": {},
+ "FPE": {},
+ "HUP": {},
+ "ILL": {},
+ "INT": {},
+ "IO": {},
+ "IOT": {},
+ "KILL": {},
+ "PIPE": {},
+ "POLL": {},
+ "PROF": {},
+ "PWR": {},
+ "QUIT": {},
+ "SEGV": {},
+ "STKFLT": {},
+ "STOP": {},
+ "SYS": {},
+ "TERM": {},
+ "TRAP": {},
+ "TSTP": {},
+ "TTIN": {},
+ "TTOU": {},
+ "UNUSED": {},
+ "URG": {},
+ "USR1": {},
+ "USR2": {},
+ "VTALRM": {},
+ "WINCH": {},
+ "XCPU": {},
+ "XFSZ": {},
+ "RTMIN": {},
+ "RTMIN+1": {},
+ "RTMIN+2": {},
+ "RTMIN+3": {},
+ "RTMIN+4": {},
+ "RTMIN+5": {},
+ "RTMIN+6": {},
+ "RTMIN+7": {},
+ "RTMIN+8": {},
+ "RTMIN+9": {},
+ "RTMIN+10": {},
+ "RTMIN+11": {},
+ "RTMIN+12": {},
+ "RTMIN+13": {},
+ "RTMIN+14": {},
+ "RTMIN+15": {},
+ "RTMAX-14": {},
+ "RTMAX-13": {},
+ "RTMAX-12": {},
+ "RTMAX-11": {},
+ "RTMAX-10": {},
+ "RTMAX-9": {},
+ "RTMAX-8": {},
+ "RTMAX-7": {},
+ "RTMAX-6": {},
+ "RTMAX-5": {},
+ "RTMAX-4": {},
+ "RTMAX-3": {},
+ "RTMAX-2": {},
+ "RTMAX-1": {},
+ "RTMAX": {},
+
+ "BUG": {},
+ "EMT": {},
+ "INFO": {},
+
+ "BUF": {},
+ "LWP": {},
+ "THR": {},
+}
diff --git a/vendor/github.com/openshift/imagebuilder/strslice/strslice.go b/vendor/github.com/openshift/imagebuilder/strslice/strslice.go
new file mode 100644
index 000000000..bad493fb8
--- /dev/null
+++ b/vendor/github.com/openshift/imagebuilder/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}