summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2020-09-15 12:17:12 +0200
committerGitHub <noreply@github.com>2020-09-15 12:17:12 +0200
commit3b4ad9a81d7c2d194f4d3f2686d47d78857b65d7 (patch)
treefc635fda5032a7054f562025b4152ac54ba20f85
parentd5db2af340b94b9900cf194e35d57af40a695fe1 (diff)
parent146c68f3acdc01f393a6cfadf9bc98eec3e8de94 (diff)
downloadpodman-3b4ad9a81d7c2d194f4d3f2686d47d78857b65d7.tar.gz
podman-3b4ad9a81d7c2d194f4d3f2686d47d78857b65d7.tar.bz2
podman-3b4ad9a81d7c2d194f4d3f2686d47d78857b65d7.zip
Merge pull request #7452 from jwhonce/issues/7136
Refactor API build endpoint to be more compliant
-rw-r--r--cmd/podman/images/build.go9
-rw-r--r--cmd/podman/root.go2
-rw-r--r--pkg/api/handlers/compat/images_build.go258
-rw-r--r--pkg/bindings/images/build.go227
-rw-r--r--pkg/bindings/images/images.go110
-rw-r--r--pkg/channel/doc.go17
-rw-r--r--pkg/channel/writer.go53
-rw-r--r--pkg/channelwriter/channelwriter.go34
-rw-r--r--pkg/domain/infra/abi/images.go1
-rw-r--r--pkg/domain/infra/tunnel/images.go92
-rw-r--r--pkg/varlinkapi/images.go6
-rw-r--r--pkg/varlinkapi/util.go6
-rw-r--r--test/e2e/common_test.go2
-rw-r--r--test/e2e/images_test.go7
-rw-r--r--test/system/070-build.bats32
15 files changed, 478 insertions, 378 deletions
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index ff5c6ec09..00777c48c 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -206,14 +206,9 @@ func build(cmd *cobra.Command, args []string) error {
}
}
- ie, err := registry.NewImageEngine(cmd, args)
- if err != nil {
- return err
- }
-
var logfile *os.File
if cmd.Flag("logfile").Changed {
- logfile, err = os.OpenFile(buildOpts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+ logfile, err := os.OpenFile(buildOpts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
if err != nil {
return errors.Errorf("error opening logfile %q: %v", buildOpts.Logfile, err)
}
@@ -225,7 +220,7 @@ func build(cmd *cobra.Command, args []string) error {
return err
}
- _, err = ie.Build(registry.GetContext(), containerFiles, *apiBuildOpts)
+ _, err = registry.ImageEngine().Build(registry.GetContext(), containerFiles, *apiBuildOpts)
return err
}
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 60725b111..d079018a0 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -273,7 +273,6 @@ func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) {
pFlags.StringVar(&opts.RegistriesConf, "registries-conf", "", "Path to a registries.conf to use for image processing")
pFlags.StringVar(&opts.Runroot, "runroot", "", "Path to the 'run directory' where all state information is stored")
pFlags.StringVar(&opts.RuntimePath, "runtime", "", "Path to the OCI-compatible binary used to run containers, default is /usr/bin/runc")
- pFlags.StringArrayVar(&opts.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
// -s is deprecated due to conflict with -s on subcommands
pFlags.StringVar(&opts.StorageDriver, "storage-driver", "", "Select which storage driver is used to manage storage of images and containers (default is overlay)")
pFlags.StringArrayVar(&opts.StorageOpts, "storage-opt", []string{}, "Used to pass an option to the storage driver")
@@ -301,6 +300,7 @@ func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) {
// Only create these flags for ABI connections
if !registry.IsRemote() {
+ pFlags.StringArrayVar(&opts.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
pFlags.BoolVar(&useSyslog, "syslog", false, "Output logging information to syslog as well as the console (default false)")
}
}
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index 9601f5e18..fbaf8d10a 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -1,7 +1,7 @@
package compat
import (
- "bytes"
+ "context"
"encoding/base64"
"encoding/json"
"fmt"
@@ -18,8 +18,10 @@ import (
"github.com/containers/podman/v2/libpod"
"github.com/containers/podman/v2/pkg/api/handlers"
"github.com/containers/podman/v2/pkg/api/handlers/utils"
+ "github.com/containers/podman/v2/pkg/channel"
"github.com/containers/storage/pkg/archive"
"github.com/gorilla/schema"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -47,12 +49,25 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
}
- anchorDir, err := extractTarFile(r)
+ contextDirectory, err := extractTarFile(r)
if err != nil {
utils.InternalServerError(w, err)
return
}
- defer os.RemoveAll(anchorDir)
+
+ defer func() {
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found {
+ if keep, _ := strconv.ParseBool(v); keep {
+ return
+ }
+ }
+ }
+ err := os.RemoveAll(filepath.Dir(contextDirectory))
+ if err != nil {
+ logrus.Warn(errors.Wrapf(err, "failed to remove build scratch directory %q", filepath.Dir(contextDirectory)))
+ }
+ }()
query := struct {
Dockerfile string `schema:"dockerfile"`
@@ -67,10 +82,10 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
ForceRm bool `schema:"forcerm"`
Memory int64 `schema:"memory"`
MemSwap int64 `schema:"memswap"`
- CpuShares uint64 `schema:"cpushares"` //nolint
- CpuSetCpus string `schema:"cpusetcpus"` //nolint
- CpuPeriod uint64 `schema:"cpuperiod"` //nolint
- CpuQuota int64 `schema:"cpuquota"` //nolint
+ CpuShares uint64 `schema:"cpushares"` // nolint
+ CpuSetCpus string `schema:"cpusetcpus"` // nolint
+ CpuPeriod uint64 `schema:"cpuperiod"` // nolint
+ CpuQuota int64 `schema:"cpuquota"` // nolint
BuildArgs string `schema:"buildargs"`
ShmSize int `schema:"shmsize"`
Squash bool `schema:"squash"`
@@ -81,52 +96,32 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Outputs string `schema:"outputs"`
Registry string `schema:"registry"`
}{
- Dockerfile: "Dockerfile",
- Tag: []string{},
- ExtraHosts: "",
- Remote: "",
- Quiet: false,
- NoCache: false,
- CacheFrom: "",
- Pull: false,
- Rm: true,
- ForceRm: false,
- Memory: 0,
- MemSwap: 0,
- CpuShares: 0,
- CpuSetCpus: "",
- CpuPeriod: 0,
- CpuQuota: 0,
- BuildArgs: "",
- ShmSize: 64 * 1024 * 1024,
- Squash: false,
- Labels: "",
- NetworkMode: "",
- Platform: "",
- Target: "",
- Outputs: "",
- Registry: "docker.io",
+ Dockerfile: "Dockerfile",
+ Tag: []string{},
+ Rm: true,
+ ShmSize: 64 * 1024 * 1024,
+ Registry: "docker.io",
}
+
decoder := r.Context().Value("decoder").(*schema.Decoder)
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err)
return
}
- var (
- output string
- additionalNames []string
- )
+ var output string
if len(query.Tag) > 0 {
output = query.Tag[0]
}
+ if _, found := r.URL.Query()["target"]; found {
+ output = query.Target
+ }
+
+ var additionalNames []string
if len(query.Tag) > 1 {
additionalNames = query.Tag[1:]
}
- if _, found := r.URL.Query()["target"]; found {
- output = query.Target
- }
var buildArgs = map[string]string{}
if _, found := r.URL.Query()["buildargs"]; found {
if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil {
@@ -156,95 +151,136 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
}
- // build events will be recorded here
- var (
- buildEvents = []string{}
- progress = bytes.Buffer{}
- )
+ // Channels all mux'ed in select{} below to follow API build protocol
+ stdout := channel.NewWriter(make(chan []byte, 1))
+ defer stdout.Close()
+
+ auxout := channel.NewWriter(make(chan []byte, 1))
+ defer auxout.Close()
+
+ stderr := channel.NewWriter(make(chan []byte, 1))
+ defer stderr.Close()
+
+ reporter := channel.NewWriter(make(chan []byte, 1))
+ defer reporter.Close()
buildOptions := imagebuildah.BuildOptions{
- ContextDirectory: filepath.Join(anchorDir, "build"),
+ ContextDirectory: contextDirectory,
PullPolicy: pullPolicy,
Registry: query.Registry,
IgnoreUnrecognizedInstructions: true,
Quiet: query.Quiet,
Isolation: buildah.IsolationChroot,
- Runtime: "",
- RuntimeArgs: nil,
- TransientMounts: nil,
Compression: archive.Gzip,
Args: buildArgs,
Output: output,
AdditionalTags: additionalNames,
- Log: func(format string, args ...interface{}) {
- buildEvents = append(buildEvents, fmt.Sprintf(format, args...))
- },
- In: nil,
- Out: &progress,
- Err: &progress,
- SignaturePolicyPath: "",
- ReportWriter: &progress,
- OutputFormat: buildah.Dockerv2ImageManifest,
- SystemContext: nil,
- NamespaceOptions: nil,
- ConfigureNetwork: 0,
- CNIPluginPath: "",
- CNIConfigDir: "",
- IDMappingOptions: nil,
- AddCapabilities: nil,
- DropCapabilities: nil,
+ Out: stdout,
+ Err: auxout,
+ ReportWriter: reporter,
+ OutputFormat: buildah.Dockerv2ImageManifest,
CommonBuildOpts: &buildah.CommonBuildOptions{
- AddHost: nil,
- CgroupParent: "",
- CPUPeriod: query.CpuPeriod,
- CPUQuota: query.CpuQuota,
- CPUShares: query.CpuShares,
- CPUSetCPUs: query.CpuSetCpus,
- CPUSetMems: "",
- HTTPProxy: false,
- Memory: query.Memory,
- DNSSearch: nil,
- DNSServers: nil,
- DNSOptions: nil,
- MemorySwap: query.MemSwap,
- LabelOpts: nil,
- SeccompProfilePath: "",
- ApparmorProfile: "",
- ShmSize: strconv.Itoa(query.ShmSize),
- Ulimit: nil,
- Volumes: nil,
+ CPUPeriod: query.CpuPeriod,
+ CPUQuota: query.CpuQuota,
+ CPUShares: query.CpuShares,
+ CPUSetCPUs: query.CpuSetCpus,
+ Memory: query.Memory,
+ MemorySwap: query.MemSwap,
+ ShmSize: strconv.Itoa(query.ShmSize),
},
- DefaultMountsFilePath: "",
- IIDFile: "",
Squash: query.Squash,
Labels: labels,
- Annotations: nil,
- OnBuild: nil,
- Layers: false,
NoCache: query.NoCache,
RemoveIntermediateCtrs: query.Rm,
ForceRmIntermediateCtrs: query.ForceRm,
- BlobDirectory: "",
Target: query.Target,
- Devices: nil,
}
runtime := r.Context().Value("runtime").(*libpod.Runtime)
- id, _, err := runtime.Build(r.Context(), buildOptions, query.Dockerfile)
- if err != nil {
- utils.InternalServerError(w, err)
- return
+ runCtx, cancel := context.WithCancel(context.Background())
+ var imageID string
+ go func() {
+ defer cancel()
+ imageID, _, err = runtime.Build(r.Context(), buildOptions, query.Dockerfile)
+ if err != nil {
+ stderr.Write([]byte(err.Error() + "\n"))
+ }
+ }()
+
+ flush := func() {
+ if flusher, ok := w.(http.Flusher); ok {
+ flusher.Flush()
+ }
}
- // Find image ID that was built...
- utils.WriteResponse(w, http.StatusOK,
- struct {
- Stream string `json:"stream"`
- }{
- Stream: progress.String() + "\n" +
- strings.Join(buildEvents, "\n") +
- fmt.Sprintf("\nSuccessfully built %s\n", id),
- })
+ // Send headers and prime client for stream to come
+ w.WriteHeader(http.StatusOK)
+ w.Header().Add("Content-Type", "application/json")
+ flush()
+
+ var failed bool
+
+ body := w.(io.Writer)
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found {
+ if keep, _ := strconv.ParseBool(v); keep {
+ t, _ := ioutil.TempFile("", "build_*_server")
+ defer t.Close()
+ body = io.MultiWriter(t, w)
+ }
+ }
+ }
+
+ enc := json.NewEncoder(body)
+ enc.SetEscapeHTML(true)
+loop:
+ for {
+ m := struct {
+ Stream string `json:"stream,omitempty"`
+ Error string `json:"error,omitempty"`
+ }{}
+
+ select {
+ case e := <-stdout.Chan():
+ m.Stream = string(e)
+ if err := enc.Encode(m); err != nil {
+ stderr.Write([]byte(err.Error()))
+ }
+ flush()
+ case e := <-auxout.Chan():
+ m.Stream = string(e)
+ if err := enc.Encode(m); err != nil {
+ stderr.Write([]byte(err.Error()))
+ }
+ flush()
+ case e := <-reporter.Chan():
+ m.Stream = string(e)
+ if err := enc.Encode(m); err != nil {
+ stderr.Write([]byte(err.Error()))
+ }
+ flush()
+ case e := <-stderr.Chan():
+ failed = true
+ m.Error = string(e)
+ if err := enc.Encode(m); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ case <-runCtx.Done():
+ if !failed {
+ if utils.IsLibpodRequest(r) {
+ m.Stream = imageID
+ } else {
+ m.Stream = fmt.Sprintf("Successfully built %12.12s\n", imageID)
+ }
+ if err := enc.Encode(m); err != nil {
+ logrus.Warnf("Failed to json encode error %q", err.Error())
+ }
+ flush()
+ }
+ break loop
+ }
+ }
}
func extractTarFile(r *http.Request) (string, error) {
@@ -253,10 +289,9 @@ func extractTarFile(r *http.Request) (string, error) {
if err != nil {
return "", err
}
- buildDir := filepath.Join(anchorDir, "build")
path := filepath.Join(anchorDir, "tarBall")
- tarBall, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ tarBall, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return "", err
}
@@ -265,14 +300,17 @@ func extractTarFile(r *http.Request) (string, error) {
// Content-Length not used as too many existing API clients didn't honor it
_, err = io.Copy(tarBall, r.Body)
r.Body.Close()
-
if err != nil {
return "", fmt.Errorf("failed Request: Unable to copy tar file from request body %s", r.RequestURI)
}
- _, _ = tarBall.Seek(0, 0)
- if err := archive.Untar(tarBall, buildDir, &archive.TarOptions{}); err != nil {
+ buildDir := filepath.Join(anchorDir, "build")
+ err = os.Mkdir(buildDir, 0700)
+ if err != nil {
return "", err
}
- return anchorDir, nil
+
+ _, _ = tarBall.Seek(0, 0)
+ err = archive.Untar(tarBall, buildDir, nil)
+ return buildDir, err
}
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
new file mode 100644
index 000000000..9082670a7
--- /dev/null
+++ b/pkg/bindings/images/build.go
@@ -0,0 +1,227 @@
+package images
+
+import (
+ "archive/tar"
+ "context"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/containers/buildah"
+ "github.com/containers/podman/v2/pkg/bindings"
+ "github.com/containers/podman/v2/pkg/domain/entities"
+ "github.com/docker/go-units"
+ "github.com/hashicorp/go-multierror"
+ jsoniter "github.com/json-iterator/go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Build creates an image using a containerfile reference
+func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions) (*entities.BuildReport, error) {
+ params := url.Values{}
+
+ if t := options.Output; len(t) > 0 {
+ params.Set("t", t)
+ }
+ for _, tag := range options.AdditionalTags {
+ params.Add("t", tag)
+ }
+ if options.Quiet {
+ params.Set("q", "1")
+ }
+ if options.NoCache {
+ params.Set("nocache", "1")
+ }
+ // TODO cachefrom
+ if options.PullPolicy == buildah.PullAlways {
+ params.Set("pull", "1")
+ }
+ if options.RemoveIntermediateCtrs {
+ params.Set("rm", "1")
+ }
+ if options.ForceRmIntermediateCtrs {
+ params.Set("forcerm", "1")
+ }
+ if mem := options.CommonBuildOpts.Memory; mem > 0 {
+ params.Set("memory", strconv.Itoa(int(mem)))
+ }
+ if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 {
+ params.Set("memswap", strconv.Itoa(int(memSwap)))
+ }
+ if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 {
+ params.Set("cpushares", strconv.Itoa(int(cpuShares)))
+ }
+ if cpuSetCpus := options.CommonBuildOpts.CPUSetCPUs; len(cpuSetCpus) > 0 {
+ params.Set("cpusetcpues", cpuSetCpus)
+ }
+ if cpuPeriod := options.CommonBuildOpts.CPUPeriod; cpuPeriod > 0 {
+ params.Set("cpuperiod", strconv.Itoa(int(cpuPeriod)))
+ }
+ if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 {
+ params.Set("cpuquota", strconv.Itoa(int(cpuQuota)))
+ }
+ if buildArgs := options.Args; len(buildArgs) > 0 {
+ bArgs, err := jsoniter.MarshalToString(buildArgs)
+ if err != nil {
+ return nil, err
+ }
+ params.Set("buildargs", bArgs)
+ }
+ if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 {
+ shmBytes, err := units.RAMInBytes(shmSize)
+ if err != nil {
+ return nil, err
+ }
+ params.Set("shmsize", strconv.Itoa(int(shmBytes)))
+ }
+ if options.Squash {
+ params.Set("squash", "1")
+ }
+ if labels := options.Labels; len(labels) > 0 {
+ l, err := jsoniter.MarshalToString(labels)
+ if err != nil {
+ return nil, err
+ }
+ params.Set("labels", l)
+ }
+
+ stdout := io.Writer(os.Stdout)
+ if options.Out != nil {
+ stdout = options.Out
+ }
+
+ // TODO network?
+
+ var platform string
+ if OS := options.OS; len(OS) > 0 {
+ platform += OS
+ }
+ if arch := options.Architecture; len(arch) > 0 {
+ platform += "/" + arch
+ }
+ if len(platform) > 0 {
+ params.Set("platform", platform)
+ }
+
+ entries := make([]string, len(containerFiles))
+ copy(entries, containerFiles)
+ entries = append(entries, options.ContextDirectory)
+ tarfile, err := nTar(entries...)
+ if err != nil {
+ return nil, err
+ }
+ defer tarfile.Close()
+ params.Set("dockerfile", filepath.Base(containerFiles[0]))
+
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer response.Body.Close()
+
+ if !response.IsSuccess() {
+ return nil, response.Process(err)
+ }
+
+ body := response.Body.(io.Reader)
+ if logrus.IsLevelEnabled(logrus.DebugLevel) {
+ if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found {
+ if keep, _ := strconv.ParseBool(v); keep {
+ t, _ := ioutil.TempFile("", "build_*_client")
+ defer t.Close()
+ body = io.TeeReader(response.Body, t)
+ }
+ }
+ }
+
+ dec := json.NewDecoder(body)
+ re := regexp.MustCompile(`[0-9a-f]{12}`)
+
+ var id string
+ for {
+ var s struct {
+ Stream string `json:"stream,omitempty"`
+ Error string `json:"error,omitempty"`
+ }
+ if err := dec.Decode(&s); err != nil {
+ if errors.Is(err, io.EOF) {
+ return &entities.BuildReport{ID: id}, nil
+ }
+ s.Error = err.Error() + "\n"
+ }
+
+ switch {
+ case s.Stream != "":
+ stdout.Write([]byte(s.Stream))
+ if re.Match([]byte(s.Stream)) {
+ id = s.Stream
+ }
+ case s.Error != "":
+ return nil, errors.New(s.Error)
+ default:
+ return &entities.BuildReport{ID: id}, errors.New("failed to parse build results stream, unexpected input")
+ }
+ }
+}
+
+func nTar(sources ...string) (io.ReadCloser, error) {
+ if len(sources) == 0 {
+ return nil, errors.New("No source(s) provided for build")
+ }
+
+ pr, pw := io.Pipe()
+ tw := tar.NewWriter(pw)
+
+ var merr error
+ go func() {
+ defer pw.Close()
+ defer tw.Close()
+
+ for _, src := range sources {
+ s := src
+ err := filepath.Walk(s, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !info.Mode().IsRegular() || path == s {
+ return nil
+ }
+
+ f, lerr := os.Open(path)
+ if lerr != nil {
+ return lerr
+ }
+
+ name := strings.TrimPrefix(path, s+string(filepath.Separator))
+ hdr, lerr := tar.FileInfoHeader(info, name)
+ if lerr != nil {
+ f.Close()
+ return lerr
+ }
+ hdr.Name = name
+ if lerr := tw.WriteHeader(hdr); lerr != nil {
+ f.Close()
+ return lerr
+ }
+
+ _, cerr := io.Copy(tw, f)
+ f.Close()
+ return cerr
+ })
+ merr = multierror.Append(merr, err)
+ }
+ }()
+ return pr, merr
+}
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index a80c94025..05ab25d5b 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -1,7 +1,6 @@
package images
import (
- "bytes"
"context"
"fmt"
"io"
@@ -9,14 +8,11 @@ import (
"net/url"
"strconv"
- "github.com/containers/buildah"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v2/pkg/api/handlers"
"github.com/containers/podman/v2/pkg/auth"
"github.com/containers/podman/v2/pkg/bindings"
"github.com/containers/podman/v2/pkg/domain/entities"
- "github.com/docker/go-units"
- jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
)
@@ -242,112 +238,6 @@ func Untag(ctx context.Context, nameOrID, tag, repo string) error {
return response.Process(nil)
}
-// Build creates an image using a containerfile reference
-func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions, tarfile io.Reader) (*entities.BuildReport, error) {
- var (
- platform string
- report entities.BuildReport
- )
- conn, err := bindings.GetClient(ctx)
- if err != nil {
- return nil, err
- }
- params := url.Values{}
- params.Set("dockerfile", containerFiles[0])
- if t := options.Output; len(t) > 0 {
- params.Set("t", t)
- }
- for _, tag := range options.AdditionalTags {
- params.Add("t", tag)
- }
- // TODO Remote, Quiet
- if options.NoCache {
- params.Set("nocache", "1")
- }
- // TODO cachefrom
- if options.PullPolicy == buildah.PullAlways {
- params.Set("pull", "1")
- }
- if options.RemoveIntermediateCtrs {
- params.Set("rm", "1")
- }
- if options.ForceRmIntermediateCtrs {
- params.Set("forcerm", "1")
- }
- if mem := options.CommonBuildOpts.Memory; mem > 0 {
- params.Set("memory", strconv.Itoa(int(mem)))
- }
- if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 {
- params.Set("memswap", strconv.Itoa(int(memSwap)))
- }
- if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 {
- params.Set("cpushares", strconv.Itoa(int(cpuShares)))
- }
- if cpuSetCpus := options.CommonBuildOpts.CPUSetCPUs; len(cpuSetCpus) > 0 {
- params.Set("cpusetcpues", cpuSetCpus)
- }
- if cpuPeriod := options.CommonBuildOpts.CPUPeriod; cpuPeriod > 0 {
- params.Set("cpuperiod", strconv.Itoa(int(cpuPeriod)))
- }
- if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 {
- params.Set("cpuquota", strconv.Itoa(int(cpuQuota)))
- }
- if buildArgs := options.Args; len(buildArgs) > 0 {
- bArgs, err := jsoniter.MarshalToString(buildArgs)
- if err != nil {
- return nil, err
- }
- params.Set("buildargs", bArgs)
- }
- if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 {
- shmBytes, err := units.RAMInBytes(shmSize)
- if err != nil {
- return nil, err
- }
- params.Set("shmsize", strconv.Itoa(int(shmBytes)))
- }
- if options.Squash {
- params.Set("squash", "1")
- }
- if labels := options.Labels; len(labels) > 0 {
- l, err := jsoniter.MarshalToString(labels)
- if err != nil {
- return nil, err
- }
- params.Set("labels", l)
- }
-
- // TODO network?
- if OS := options.OS; len(OS) > 0 {
- platform += OS
- }
- if arch := options.Architecture; len(arch) > 0 {
- platform += "/" + arch
- }
- if len(platform) > 0 {
- params.Set("platform", platform)
- }
- // TODO outputs?
-
- response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params, nil)
- if err != nil {
- return nil, err
- }
- var streamReponse []byte
- bb := bytes.NewBuffer(streamReponse)
- if _, err = io.Copy(bb, response.Body); err != nil {
- return nil, err
- }
- var s struct {
- Stream string `json:"stream"`
- }
- if err := jsoniter.UnmarshalFromString(bb.String(), &s); err != nil {
- return nil, err
- }
- fmt.Print(s.Stream)
- return &report, nil
-}
-
// Imports adds the given image to the local image store. This can be done by file and the given reader
// or via the url parameter. Additional metadata can be associated with the image by using the changes and
// message parameters. The image can also be tagged given a reference. One of url OR r must be provided.
diff --git a/pkg/channel/doc.go b/pkg/channel/doc.go
new file mode 100644
index 000000000..656fbddaa
--- /dev/null
+++ b/pkg/channel/doc.go
@@ -0,0 +1,17 @@
+/*
+Package channel provides helper structs/methods/funcs for working with channels
+
+Proxy from an io.Writer to a channel:
+
+ w := channel.NewWriter(make(chan []byte, 10))
+ go func() {
+ w.Write([]byte("Hello, World"))
+ }()
+
+ fmt.Println(string(<-w.Chan()))
+ w.Close()
+
+Use of the constructor is required to initialize the channel.
+Provide a channel of sufficient size to handle messages from writer(s).
+*/
+package channel
diff --git a/pkg/channel/writer.go b/pkg/channel/writer.go
new file mode 100644
index 000000000..dbb38e416
--- /dev/null
+++ b/pkg/channel/writer.go
@@ -0,0 +1,53 @@
+package channel
+
+import (
+ "io"
+ "sync"
+
+ "github.com/pkg/errors"
+)
+
+// WriteCloser is an io.WriteCloser that that proxies Write() calls to a channel
+// The []byte buffer of the Write() is queued on the channel as one message.
+type WriteCloser interface {
+ io.WriteCloser
+ Chan() <-chan []byte
+}
+
+type writeCloser struct {
+ ch chan []byte
+ mux sync.Mutex
+}
+
+// NewWriter initializes a new channel writer
+func NewWriter(c chan []byte) WriteCloser {
+ return &writeCloser{
+ ch: c,
+ }
+}
+
+// Chan returns the R/O channel behind WriteCloser
+func (w *writeCloser) Chan() <-chan []byte {
+ return w.ch
+}
+
+// Write method for WriteCloser
+func (w *writeCloser) Write(b []byte) (int, error) {
+ if w == nil || w.ch == nil {
+ return 0, errors.New("use channel.NewWriter() to initialize a WriteCloser")
+ }
+
+ w.mux.Lock()
+ buf := make([]byte, len(b))
+ copy(buf, b)
+ w.ch <- buf
+ w.mux.Unlock()
+
+ return len(b), nil
+}
+
+// Close method for WriteCloser
+func (w *writeCloser) Close() error {
+ close(w.ch)
+ return nil
+}
diff --git a/pkg/channelwriter/channelwriter.go b/pkg/channelwriter/channelwriter.go
deleted file mode 100644
index d51400eb3..000000000
--- a/pkg/channelwriter/channelwriter.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package channelwriter
-
-import "github.com/pkg/errors"
-
-// Writer is an io.writer-like object that "writes" to a channel
-// instead of a buffer or file, etc. It is handy for varlink endpoints when
-// needing to handle endpoints that do logging "real-time"
-type Writer struct {
- ByteChannel chan []byte
-}
-
-// NewChannelWriter creates a new channel writer and adds a
-// byte slice channel into it.
-func NewChannelWriter() *Writer {
- byteChannel := make(chan []byte)
- return &Writer{
- ByteChannel: byteChannel,
- }
-}
-
-// Write method for Writer
-func (c *Writer) Write(w []byte) (int, error) {
- if c.ByteChannel == nil {
- return 0, errors.New("channel writer channel cannot be nil")
- }
- c.ByteChannel <- w
- return len(w), nil
-}
-
-// Close method for Writer
-func (c *Writer) Close() error {
- close(c.ByteChannel)
- return nil
-}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 23aef9573..cc3ec37fb 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -535,6 +535,7 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) {
}
func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) {
+
id, _, err := ir.Libpod.Build(ctx, opts.BuildOptions, containerFiles...)
if err != nil {
return nil, err
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index 185cc2f9a..50b8342a3 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -1,13 +1,9 @@
package tunnel
import (
- "archive/tar"
- "bytes"
"context"
- "io"
"io/ioutil"
"os"
- "path/filepath"
"strings"
"time"
@@ -18,9 +14,7 @@ import (
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/domain/utils"
utils2 "github.com/containers/podman/v2/utils"
- "github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) {
@@ -311,28 +305,8 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) {
return config.Default()
}
-func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) {
- var tarReader io.Reader
- tarfile, err := archive.Tar(opts.ContextDirectory, 0)
- if err != nil {
- return nil, err
- }
- tarReader = tarfile
- cwd, err := os.Getwd()
- if err != nil {
- return nil, err
- }
- if cwd != opts.ContextDirectory {
- fn := func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) {
- h.Name = filepath.Join(filepath.Base(opts.ContextDirectory), h.Name)
- return nil, false, false, nil
- }
- tarReader, err = transformArchive(tarfile, false, fn)
- if err != nil {
- return nil, err
- }
- }
- return images.Build(ir.ClientCxt, containerFiles, opts, tarReader)
+func (ir *ImageEngine) Build(_ context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) {
+ return images.Build(ir.ClientCxt, containerFiles, opts)
}
func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
@@ -346,65 +320,3 @@ func (ir *ImageEngine) Shutdown(_ context.Context) {
func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
return nil, errors.New("not implemented yet")
}
-
-// Sourced from openshift image builder
-
-// TransformFileFunc is given a chance to transform an arbitrary input file.
-type TransformFileFunc func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error)
-
-// filterArchive transforms the provided input archive to a new archive,
-// giving the fn a chance to transform arbitrary files.
-func filterArchive(r io.Reader, w io.Writer, fn TransformFileFunc) error {
- tr := tar.NewReader(r)
- tw := tar.NewWriter(w)
-
- var body io.Reader = tr
-
- for {
- h, err := tr.Next()
- if err == io.EOF {
- return tw.Close()
- }
- if err != nil {
- return err
- }
-
- name := h.Name
- data, ok, skip, err := fn(h, tr)
- logrus.Debugf("Transform %q -> %q: data=%t ok=%t skip=%t err=%v", name, h.Name, data != nil, ok, skip, err)
- if err != nil {
- return err
- }
- if skip {
- continue
- }
- if ok {
- h.Size = int64(len(data))
- body = bytes.NewBuffer(data)
- }
- if err := tw.WriteHeader(h); err != nil {
- return err
- }
- if _, err := io.Copy(tw, body); err != nil {
- return err
- }
- }
-}
-
-func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Reader, error) {
- var cwe error
- pr, pw := io.Pipe()
- go func() {
- if compressed {
- in, err := archive.DecompressStream(r)
- if err != nil {
- cwe = pw.CloseWithError(err)
- return
- }
- r = in
- }
- err := filterArchive(r, pw, fn)
- cwe = pw.CloseWithError(err)
- }()
- return pr, cwe
-}
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index 1dcfa2213..4bcf70b0d 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -23,7 +23,7 @@ import (
"github.com/containers/podman/v2/libpod"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/image"
- "github.com/containers/podman/v2/pkg/channelwriter"
+ "github.com/containers/podman/v2/pkg/channel"
"github.com/containers/podman/v2/pkg/util"
iopodman "github.com/containers/podman/v2/pkg/varlink"
"github.com/containers/podman/v2/utils"
@@ -570,7 +570,7 @@ func (i *VarlinkAPI) Commit(call iopodman.VarlinkCall, name, imageName string, c
log []string
mimeType string
)
- output := channelwriter.NewChannelWriter()
+ output := channel.NewWriter(make(chan []byte))
channelClose := func() {
if err := output.Close(); err != nil {
logrus.Errorf("failed to close channel writer: %q", err)
@@ -704,7 +704,7 @@ func (i *VarlinkAPI) PullImage(call iopodman.VarlinkCall, name string, creds iop
if call.WantsMore() {
call.Continues = true
}
- output := channelwriter.NewChannelWriter()
+ output := channel.NewWriter(make(chan []byte))
channelClose := func() {
if err := output.Close(); err != nil {
logrus.Errorf("failed to close channel writer: %q", err)
diff --git a/pkg/varlinkapi/util.go b/pkg/varlinkapi/util.go
index 748d8f9cc..7f96965f0 100644
--- a/pkg/varlinkapi/util.go
+++ b/pkg/varlinkapi/util.go
@@ -11,7 +11,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/podman/v2/libpod"
"github.com/containers/podman/v2/libpod/define"
- "github.com/containers/podman/v2/pkg/channelwriter"
+ "github.com/containers/podman/v2/pkg/channel"
iopodman "github.com/containers/podman/v2/pkg/varlink"
"github.com/containers/storage/pkg/archive"
)
@@ -201,7 +201,7 @@ func makePsOpts(inOpts iopodman.PsOpts) PsOptions {
// more. it is capable of sending updates as the output writer gets them or append them
// all to a log. the chan error is the error from the libpod call so we can honor
// and error event in that case.
-func forwardOutput(log []string, c chan error, wantsMore bool, output *channelwriter.Writer, reply func(br iopodman.MoreResponse) error) ([]string, error) {
+func forwardOutput(log []string, c chan error, wantsMore bool, output channel.WriteCloser, reply func(br iopodman.MoreResponse) error) ([]string, error) {
done := false
for {
select {
@@ -214,7 +214,7 @@ func forwardOutput(log []string, c chan error, wantsMore bool, output *channelwr
done = true
// if no error is found, we pull what we can from the log writer and
// append it to log string slice
- case line := <-output.ByteChannel:
+ case line := <-output.Chan():
log = append(log, string(line))
// If the end point is being used in more mode, send what we have
if wantsMore {
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index b6bbae15b..2ce3f9760 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -432,7 +432,7 @@ func (p *PodmanTestIntegration) BuildImage(dockerfile, imageName string, layers
Expect(err).To(BeNil())
session := p.PodmanNoCache([]string{"build", "--layers=" + layers, "-t", imageName, "--file", dockerfilePath, p.TempDir})
session.Wait(120)
- Expect(session.ExitCode()).To(Equal(0))
+ Expect(session).Should(Exit(0), fmt.Sprintf("BuildImage session output: %q", session.OutputToString()))
}
// PodmanPID execs podman and returns its PID
diff --git a/test/e2e/images_test.go b/test/e2e/images_test.go
index b22964dc1..a615a9f99 100644
--- a/test/e2e/images_test.go
+++ b/test/e2e/images_test.go
@@ -185,6 +185,7 @@ RUN apk update && apk add strace
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
Expect(len(result.OutputToStringArray()) >= 1).To(BeTrue())
+
})
It("podman images workingdir from image", func() {
@@ -226,7 +227,7 @@ WORKDIR /test
result := podmanTest.PodmanNoCache([]string{"image", "list", "-q", "-f", "after=docker.io/library/alpine:latest"})
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
- Expect(len(result.OutputToStringArray())).To(Equal(0))
+ Expect(result.OutputToStringArray()).Should(HaveLen(0), "list filter output: %q", result.OutputToString())
})
It("podman images filter dangling", func() {
@@ -236,8 +237,8 @@ WORKDIR /test
podmanTest.BuildImage(dockerfile, "foobar.com/before:latest", "false")
result := podmanTest.Podman([]string{"images", "-q", "-f", "dangling=true"})
result.WaitWithDefaultTimeout()
- Expect(result).Should(Exit(0))
- Expect(len(result.OutputToStringArray())).To(Equal(0))
+ Expect(result).Should(Exit(0), "dangling image output: %q", result.OutputToString())
+ Expect(result.OutputToStringArray()).Should(HaveLen(0), "dangling image output: %q", result.OutputToString())
})
It("podman check for image with sha256: prefix", func() {
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index 66f6610ea..e3a139b4f 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -1,4 +1,5 @@
#!/usr/bin/env bats -*- bats -*-
+# shellcheck disable=SC2096
#
# Tests for podman build
#
@@ -6,8 +7,6 @@
load helpers
@test "podman build - basic test" {
- skip_if_remote "FIXME: pending #7136"
-
rand_filename=$(random_string 20)
rand_content=$(random_string 50)
@@ -31,7 +30,7 @@ EOF
}
@test "podman build - global runtime flags test" {
- skip_if_remote "FIXME: pending #7136"
+ skip_if_remote "--runtime-flag flag not supported for remote"
rand_content=$(random_string 50)
@@ -49,11 +48,6 @@ EOF
# Regression from v1.5.0. This test passes fine in v1.5.0, fails in 1.6
@test "podman build - cache (#3920)" {
- skip_if_remote "FIXME: pending #7136, runtime flag is not passing over remote"
- if is_remote && is_rootless; then
- skip "unreliable with podman-remote and rootless; #2972"
- fi
-
# Make an empty test directory, with a subdirectory used for tar
tmpdir=$PODMAN_TMPDIR/build-test
mkdir -p $tmpdir/subtest || die "Could not mkdir $tmpdir/subtest"
@@ -97,8 +91,6 @@ EOF
}
@test "podman build - URLs" {
- skip_if_remote "FIXME: pending #7137"
-
tmpdir=$PODMAN_TMPDIR/build-test
mkdir -p $tmpdir
@@ -118,8 +110,6 @@ EOF
@test "podman build - workdir, cmd, env, label" {
- skip_if_remote "FIXME: pending #7137"
-
tmpdir=$PODMAN_TMPDIR/build-test
mkdir -p $tmpdir
@@ -194,8 +184,15 @@ EOF
build_test
is "${lines[0]}" "$workdir" "container default command: pwd"
is "${lines[1]}" "$s_echo" "container default command: output from echo"
+
is "${lines[2]}" "$s_env1" "container default command: env1"
- is "${lines[3]}" "$s_env2" "container default command: env2"
+
+ if is_remote; then
+ is "${lines[3]}" "this-should-be-overridden-by-env-host" "podman-remote does not send local environment"
+ else
+ is "${lines[3]}" "$s_env2" "container default command: env2"
+ fi
+
is "${lines[4]}" "$s_env3" "container default command: env3 (from envfile)"
is "${lines[5]}" "$s_env4" "container default command: env4 (from cmdline)"
@@ -206,7 +203,12 @@ EOF
printenv http_proxy https_proxy ftp_proxy
is "${lines[0]}" "http-proxy-in-env-file" "env-file overrides env"
is "${lines[1]}" "https-proxy-in-env-file" "env-file sets proxy var"
- is "${lines[2]}" "ftp-proxy-from-env" "ftp-proxy is passed through"
+
+ if is_remote; then
+ is "${lines[2]}" "ftp-proxy-in-image" "podman-remote does not send local environment"
+ else
+ is "${lines[2]}" "ftp-proxy-from-env" "ftp-proxy is passed through"
+ fi
# test that workdir is set for command-line commands also
run_podman run --rm build_test pwd
@@ -271,8 +273,6 @@ Labels.$label_name | $label_value
}
@test "podman build - stdin test" {
- skip_if_remote "FIXME: pending #7136"
-
# Random workdir, and random string to verify build output
workdir=/$(random_string 10)
random_echo=$(random_string 15)