diff options
Diffstat (limited to 'pkg')
30 files changed, 849 insertions, 599 deletions
diff --git a/pkg/api/handlers/compat/containers_logs.go b/pkg/api/handlers/compat/containers_logs.go index f6d4a518e..d24b7d959 100644 --- a/pkg/api/handlers/compat/containers_logs.go +++ b/pkg/api/handlers/compat/containers_logs.go @@ -105,6 +105,18 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { var frame strings.Builder header := make([]byte, 8) + + writeHeader := true + // Docker does not write stream headers iff the container has a tty. + if !utils.IsLibpodRequest(r) { + inspectData, err := ctnr.Inspect(false) + if err != nil { + utils.InternalServerError(w, errors.Wrapf(err, "Failed to obtain logs for Container '%s'", name)) + return + } + writeHeader = !inspectData.Config.Tty + } + for line := range logChannel { if _, found := r.URL.Query()["until"]; found { if line.Time.After(until) { @@ -138,10 +150,13 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { } frame.WriteString(line.Msg) - binary.BigEndian.PutUint32(header[4:], uint32(frame.Len())) - if _, err := w.Write(header[0:8]); err != nil { - log.Errorf("unable to write log output header: %q", err) + if writeHeader { + binary.BigEndian.PutUint32(header[4:], uint32(frame.Len())) + if _, err := w.Write(header[0:8]); err != nil { + log.Errorf("unable to write log output header: %q", err) + } } + if _, err := io.WriteString(w, frame.String()); err != nil { log.Errorf("unable to write frame string: %q", err) } diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go index 289bf4a2d..fbb33410f 100644 --- a/pkg/api/handlers/compat/events.go +++ b/pkg/api/handlers/compat/events.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "net/http" - "sync" "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/events" @@ -113,8 +112,13 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { errorChannel <- runtime.Events(r.Context(), readOpts) }() - var coder *jsoniter.Encoder - var writeHeader sync.Once + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + coder := json.NewEncoder(w) + coder.SetEscapeHTML(true) for stream := true; stream; stream = query.Stream { select { @@ -124,18 +128,6 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { return } case evt := <-eventChannel: - writeHeader.Do(func() { - // Use a sync.Once so that we write the header - // only once. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - coder = json.NewEncoder(w) - coder.SetEscapeHTML(true) - }) - if evt == nil { continue } diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go index 8765e20ca..c1ba9ca66 100644 --- a/pkg/api/handlers/compat/images.go +++ b/pkg/api/handlers/compat/images.go @@ -205,7 +205,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to write temporary file")) } } - iid, err := runtime.Import(r.Context(), source, "", query.Changes, "", false) + iid, err := runtime.Import(r.Context(), source, "", "", query.Changes, "", false) if err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to import tarball")) return diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go index 9601f5e18..fbaf8d10a 100644 --- a/pkg/api/handlers/compat/images_build.go +++ b/pkg/api/handlers/compat/images_build.go @@ -1,7 +1,7 @@ package compat import ( - "bytes" + "context" "encoding/base64" "encoding/json" "fmt" @@ -18,8 +18,10 @@ import ( "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/pkg/api/handlers" "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/channel" "github.com/containers/storage/pkg/archive" "github.com/gorilla/schema" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -47,12 +49,25 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } } - anchorDir, err := extractTarFile(r) + contextDirectory, err := extractTarFile(r) if err != nil { utils.InternalServerError(w, err) return } - defer os.RemoveAll(anchorDir) + + defer func() { + if logrus.IsLevelEnabled(logrus.DebugLevel) { + if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { + if keep, _ := strconv.ParseBool(v); keep { + return + } + } + } + err := os.RemoveAll(filepath.Dir(contextDirectory)) + if err != nil { + logrus.Warn(errors.Wrapf(err, "failed to remove build scratch directory %q", filepath.Dir(contextDirectory))) + } + }() query := struct { Dockerfile string `schema:"dockerfile"` @@ -67,10 +82,10 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { ForceRm bool `schema:"forcerm"` Memory int64 `schema:"memory"` MemSwap int64 `schema:"memswap"` - CpuShares uint64 `schema:"cpushares"` //nolint - CpuSetCpus string `schema:"cpusetcpus"` //nolint - CpuPeriod uint64 `schema:"cpuperiod"` //nolint - CpuQuota int64 `schema:"cpuquota"` //nolint + CpuShares uint64 `schema:"cpushares"` // nolint + CpuSetCpus string `schema:"cpusetcpus"` // nolint + CpuPeriod uint64 `schema:"cpuperiod"` // nolint + CpuQuota int64 `schema:"cpuquota"` // nolint BuildArgs string `schema:"buildargs"` ShmSize int `schema:"shmsize"` Squash bool `schema:"squash"` @@ -81,52 +96,32 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { Outputs string `schema:"outputs"` Registry string `schema:"registry"` }{ - Dockerfile: "Dockerfile", - Tag: []string{}, - ExtraHosts: "", - Remote: "", - Quiet: false, - NoCache: false, - CacheFrom: "", - Pull: false, - Rm: true, - ForceRm: false, - Memory: 0, - MemSwap: 0, - CpuShares: 0, - CpuSetCpus: "", - CpuPeriod: 0, - CpuQuota: 0, - BuildArgs: "", - ShmSize: 64 * 1024 * 1024, - Squash: false, - Labels: "", - NetworkMode: "", - Platform: "", - Target: "", - Outputs: "", - Registry: "docker.io", + Dockerfile: "Dockerfile", + Tag: []string{}, + Rm: true, + ShmSize: 64 * 1024 * 1024, + Registry: "docker.io", } + decoder := r.Context().Value("decoder").(*schema.Decoder) if err := decoder.Decode(&query, r.URL.Query()); err != nil { utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err) return } - var ( - output string - additionalNames []string - ) + var output string if len(query.Tag) > 0 { output = query.Tag[0] } + if _, found := r.URL.Query()["target"]; found { + output = query.Target + } + + var additionalNames []string if len(query.Tag) > 1 { additionalNames = query.Tag[1:] } - if _, found := r.URL.Query()["target"]; found { - output = query.Target - } var buildArgs = map[string]string{} if _, found := r.URL.Query()["buildargs"]; found { if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil { @@ -156,95 +151,136 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } } - // build events will be recorded here - var ( - buildEvents = []string{} - progress = bytes.Buffer{} - ) + // Channels all mux'ed in select{} below to follow API build protocol + stdout := channel.NewWriter(make(chan []byte, 1)) + defer stdout.Close() + + auxout := channel.NewWriter(make(chan []byte, 1)) + defer auxout.Close() + + stderr := channel.NewWriter(make(chan []byte, 1)) + defer stderr.Close() + + reporter := channel.NewWriter(make(chan []byte, 1)) + defer reporter.Close() buildOptions := imagebuildah.BuildOptions{ - ContextDirectory: filepath.Join(anchorDir, "build"), + ContextDirectory: contextDirectory, PullPolicy: pullPolicy, Registry: query.Registry, IgnoreUnrecognizedInstructions: true, Quiet: query.Quiet, Isolation: buildah.IsolationChroot, - Runtime: "", - RuntimeArgs: nil, - TransientMounts: nil, Compression: archive.Gzip, Args: buildArgs, Output: output, AdditionalTags: additionalNames, - Log: func(format string, args ...interface{}) { - buildEvents = append(buildEvents, fmt.Sprintf(format, args...)) - }, - In: nil, - Out: &progress, - Err: &progress, - SignaturePolicyPath: "", - ReportWriter: &progress, - OutputFormat: buildah.Dockerv2ImageManifest, - SystemContext: nil, - NamespaceOptions: nil, - ConfigureNetwork: 0, - CNIPluginPath: "", - CNIConfigDir: "", - IDMappingOptions: nil, - AddCapabilities: nil, - DropCapabilities: nil, + Out: stdout, + Err: auxout, + ReportWriter: reporter, + OutputFormat: buildah.Dockerv2ImageManifest, CommonBuildOpts: &buildah.CommonBuildOptions{ - AddHost: nil, - CgroupParent: "", - CPUPeriod: query.CpuPeriod, - CPUQuota: query.CpuQuota, - CPUShares: query.CpuShares, - CPUSetCPUs: query.CpuSetCpus, - CPUSetMems: "", - HTTPProxy: false, - Memory: query.Memory, - DNSSearch: nil, - DNSServers: nil, - DNSOptions: nil, - MemorySwap: query.MemSwap, - LabelOpts: nil, - SeccompProfilePath: "", - ApparmorProfile: "", - ShmSize: strconv.Itoa(query.ShmSize), - Ulimit: nil, - Volumes: nil, + CPUPeriod: query.CpuPeriod, + CPUQuota: query.CpuQuota, + CPUShares: query.CpuShares, + CPUSetCPUs: query.CpuSetCpus, + Memory: query.Memory, + MemorySwap: query.MemSwap, + ShmSize: strconv.Itoa(query.ShmSize), }, - DefaultMountsFilePath: "", - IIDFile: "", Squash: query.Squash, Labels: labels, - Annotations: nil, - OnBuild: nil, - Layers: false, NoCache: query.NoCache, RemoveIntermediateCtrs: query.Rm, ForceRmIntermediateCtrs: query.ForceRm, - BlobDirectory: "", Target: query.Target, - Devices: nil, } runtime := r.Context().Value("runtime").(*libpod.Runtime) - id, _, err := runtime.Build(r.Context(), buildOptions, query.Dockerfile) - if err != nil { - utils.InternalServerError(w, err) - return + runCtx, cancel := context.WithCancel(context.Background()) + var imageID string + go func() { + defer cancel() + imageID, _, err = runtime.Build(r.Context(), buildOptions, query.Dockerfile) + if err != nil { + stderr.Write([]byte(err.Error() + "\n")) + } + }() + + flush := func() { + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } } - // Find image ID that was built... - utils.WriteResponse(w, http.StatusOK, - struct { - Stream string `json:"stream"` - }{ - Stream: progress.String() + "\n" + - strings.Join(buildEvents, "\n") + - fmt.Sprintf("\nSuccessfully built %s\n", id), - }) + // Send headers and prime client for stream to come + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + flush() + + var failed bool + + body := w.(io.Writer) + if logrus.IsLevelEnabled(logrus.DebugLevel) { + if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { + if keep, _ := strconv.ParseBool(v); keep { + t, _ := ioutil.TempFile("", "build_*_server") + defer t.Close() + body = io.MultiWriter(t, w) + } + } + } + + enc := json.NewEncoder(body) + enc.SetEscapeHTML(true) +loop: + for { + m := struct { + Stream string `json:"stream,omitempty"` + Error string `json:"error,omitempty"` + }{} + + select { + case e := <-stdout.Chan(): + m.Stream = string(e) + if err := enc.Encode(m); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-auxout.Chan(): + m.Stream = string(e) + if err := enc.Encode(m); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-reporter.Chan(): + m.Stream = string(e) + if err := enc.Encode(m); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-stderr.Chan(): + failed = true + m.Error = string(e) + if err := enc.Encode(m); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + case <-runCtx.Done(): + if !failed { + if utils.IsLibpodRequest(r) { + m.Stream = imageID + } else { + m.Stream = fmt.Sprintf("Successfully built %12.12s\n", imageID) + } + if err := enc.Encode(m); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + } + break loop + } + } } func extractTarFile(r *http.Request) (string, error) { @@ -253,10 +289,9 @@ func extractTarFile(r *http.Request) (string, error) { if err != nil { return "", err } - buildDir := filepath.Join(anchorDir, "build") path := filepath.Join(anchorDir, "tarBall") - tarBall, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + tarBall, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return "", err } @@ -265,14 +300,17 @@ func extractTarFile(r *http.Request) (string, error) { // Content-Length not used as too many existing API clients didn't honor it _, err = io.Copy(tarBall, r.Body) r.Body.Close() - if err != nil { return "", fmt.Errorf("failed Request: Unable to copy tar file from request body %s", r.RequestURI) } - _, _ = tarBall.Seek(0, 0) - if err := archive.Untar(tarBall, buildDir, &archive.TarOptions{}); err != nil { + buildDir := filepath.Join(anchorDir, "build") + err = os.Mkdir(buildDir, 0700) + if err != nil { return "", err } - return anchorDir, nil + + _, _ = tarBall.Seek(0, 0) + err = archive.Untar(tarBall, buildDir, nil) + return buildDir, err } diff --git a/pkg/api/handlers/compat/ping.go b/pkg/api/handlers/compat/ping.go index eb7eed5b6..06150bb63 100644 --- a/pkg/api/handlers/compat/ping.go +++ b/pkg/api/handlers/compat/ping.go @@ -5,7 +5,6 @@ import ( "net/http" "github.com/containers/buildah" - "github.com/containers/podman/v2/pkg/api/handlers/utils" ) // Ping returns headers to client about the service @@ -14,13 +13,12 @@ import ( // Clients will use the Header availability to test which backend engine is in use. // Note: Additionally handler supports GET and HEAD methods func Ping(w http.ResponseWriter, r *http.Request) { - w.Header().Set("API-Version", utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion].String()) + // Note API-Version and Libpod-API-Version are set in handler_api.go w.Header().Set("BuildKit-Version", "") w.Header().Set("Docker-Experimental", "true") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Pragma", "no-cache") - w.Header().Set("Libpod-API-Version", utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String()) w.Header().Set("Libpod-Buildha-Version", buildah.Version) w.WriteHeader(http.StatusOK) diff --git a/pkg/api/handlers/compat/version.go b/pkg/api/handlers/compat/version.go index e12c7cefa..92900b75d 100644 --- a/pkg/api/handlers/compat/version.go +++ b/pkg/api/handlers/compat/version.go @@ -30,6 +30,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "Failed to obtain system memory info")) return } + components := []docker.ComponentVersion{{ Name: "Podman Engine", Version: versionInfo.Version, @@ -46,6 +47,9 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { }, }} + apiVersion := utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion] + minVersion := utils.APIVersion[utils.CompatTree][utils.MinimalAPIVersion] + utils.WriteResponse(w, http.StatusOK, entities.ComponentVersion{ Version: docker.Version{ Platform: struct { @@ -53,7 +57,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { }{ Name: fmt.Sprintf("%s/%s/%s-%s", goRuntime.GOOS, goRuntime.GOARCH, infoData.Host.Distribution.Distribution, infoData.Host.Distribution.Version), }, - APIVersion: components[0].Details["APIVersion"], + APIVersion: fmt.Sprintf("%d.%d", apiVersion.Major, apiVersion.Minor), Arch: components[0].Details["Arch"], BuildTime: components[0].Details["BuildTime"], Components: components, @@ -61,7 +65,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { GitCommit: components[0].Details["GitCommit"], GoVersion: components[0].Details["GoVersion"], KernelVersion: components[0].Details["KernelVersion"], - MinAPIVersion: components[0].Details["MinAPIVersion"], + MinAPIVersion: fmt.Sprintf("%d.%d", minVersion.Major, minVersion.Minor), Os: components[0].Details["Os"], Version: components[0].Version, }}) diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go index 85f7903dc..bc1bdc287 100644 --- a/pkg/api/handlers/libpod/images.go +++ b/pkg/api/handlers/libpod/images.go @@ -11,8 +11,6 @@ import ( "strings" "github.com/containers/buildah" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/podman/v2/libpod" @@ -25,7 +23,6 @@ import ( "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/domain/infra/abi" "github.com/containers/podman/v2/pkg/errorhandling" - "github.com/containers/podman/v2/pkg/util" utils2 "github.com/containers/podman/v2/utils" "github.com/gorilla/schema" "github.com/pkg/errors" @@ -391,7 +388,7 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) { tmpfile.Close() source = tmpfile.Name() } - importedImage, err := runtime.Import(context.Background(), source, query.Reference, query.Changes, query.Message, true) + importedImage, err := runtime.Import(context.Background(), source, query.Reference, "", query.Changes, query.Message, true) if err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to import image")) return @@ -400,123 +397,6 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) { utils.WriteResponse(w, http.StatusOK, entities.ImageImportReport{Id: importedImage}) } -// ImagesPull is the v2 libpod endpoint for pulling images. Note that the -// mandatory `reference` must be a reference to a registry (i.e., of docker -// transport or be normalized to one). Other transports are rejected as they -// do not make sense in a remote context. -func ImagesPull(w http.ResponseWriter, r *http.Request) { - runtime := r.Context().Value("runtime").(*libpod.Runtime) - decoder := r.Context().Value("decoder").(*schema.Decoder) - query := struct { - Reference string `schema:"reference"` - OverrideOS string `schema:"overrideOS"` - OverrideArch string `schema:"overrideArch"` - OverrideVariant string `schema:"overrideVariant"` - TLSVerify bool `schema:"tlsVerify"` - AllTags bool `schema:"allTags"` - }{ - TLSVerify: true, - } - - if err := decoder.Decode(&query, r.URL.Query()); err != nil { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) - return - } - - if len(query.Reference) == 0 { - utils.InternalServerError(w, errors.New("reference parameter cannot be empty")) - return - } - - imageRef, err := utils.ParseDockerReference(query.Reference) - if err != nil { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Wrapf(err, "image destination %q is not a docker-transport reference", query.Reference)) - return - } - - // Trim the docker-transport prefix. - rawImage := strings.TrimPrefix(query.Reference, fmt.Sprintf("%s://", docker.Transport.Name())) - - // all-tags doesn't work with a tagged reference, so let's check early - namedRef, err := reference.Parse(rawImage) - if err != nil { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Wrapf(err, "error parsing reference %q", rawImage)) - return - } - if _, isTagged := namedRef.(reference.Tagged); isTagged && query.AllTags { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Errorf("reference %q must not have a tag for all-tags", rawImage)) - return - } - - authConf, authfile, err := auth.GetCredentials(r) - if err != nil { - utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String())) - return - } - defer auth.RemoveAuthfile(authfile) - - // Setup the registry options - dockerRegistryOptions := image.DockerRegistryOptions{ - DockerRegistryCreds: authConf, - OSChoice: query.OverrideOS, - ArchitectureChoice: query.OverrideArch, - VariantChoice: query.OverrideVariant, - } - if _, found := r.URL.Query()["tlsVerify"]; found { - dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify) - } - - sys := runtime.SystemContext() - if sys == nil { - sys = image.GetSystemContext("", authfile, false) - } - dockerRegistryOptions.DockerCertPath = sys.DockerCertPath - sys.DockerAuthConfig = authConf - - // Prepare the images we want to pull - imagesToPull := []string{} - res := []handlers.LibpodImagesPullReport{} - imageName := namedRef.String() - - if !query.AllTags { - imagesToPull = append(imagesToPull, imageName) - } else { - tags, err := docker.GetRepositoryTags(context.Background(), sys, imageRef) - if err != nil { - utils.InternalServerError(w, errors.Wrap(err, "error getting repository tags")) - return - } - for _, tag := range tags { - imagesToPull = append(imagesToPull, fmt.Sprintf("%s:%s", imageName, tag)) - } - } - - // Finally pull the images - for _, img := range imagesToPull { - newImage, err := runtime.ImageRuntime().New( - context.Background(), - img, - "", - authfile, - os.Stderr, - &dockerRegistryOptions, - image.SigningOptions{}, - nil, - util.PullImageAlways) - if err != nil { - utils.InternalServerError(w, err) - return - } - res = append(res, handlers.LibpodImagesPullReport{ID: newImage.ID()}) - } - - utils.WriteResponse(w, http.StatusOK, res) -} - // PushImage is the handler for the compat http endpoint for pushing images. func PushImage(w http.ResponseWriter, r *http.Request) { decoder := r.Context().Value("decoder").(*schema.Decoder) diff --git a/pkg/api/handlers/libpod/images_pull.go b/pkg/api/handlers/libpod/images_pull.go new file mode 100644 index 000000000..8a2f4f4cf --- /dev/null +++ b/pkg/api/handlers/libpod/images_pull.go @@ -0,0 +1,193 @@ +package libpod + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/libpod/image" + "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/channel" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/util" + "github.com/gorilla/schema" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ImagesPull is the v2 libpod endpoint for pulling images. Note that the +// mandatory `reference` must be a reference to a registry (i.e., of docker +// transport or be normalized to one). Other transports are rejected as they +// do not make sense in a remote context. +func ImagesPull(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + query := struct { + Reference string `schema:"reference"` + OverrideOS string `schema:"overrideOS"` + OverrideArch string `schema:"overrideArch"` + OverrideVariant string `schema:"overrideVariant"` + TLSVerify bool `schema:"tlsVerify"` + AllTags bool `schema:"allTags"` + }{ + TLSVerify: true, + } + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + return + } + + if len(query.Reference) == 0 { + utils.InternalServerError(w, errors.New("reference parameter cannot be empty")) + return + } + + imageRef, err := utils.ParseDockerReference(query.Reference) + if err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "image destination %q is not a docker-transport reference", query.Reference)) + return + } + + // Trim the docker-transport prefix. + rawImage := strings.TrimPrefix(query.Reference, fmt.Sprintf("%s://", docker.Transport.Name())) + + // all-tags doesn't work with a tagged reference, so let's check early + namedRef, err := reference.Parse(rawImage) + if err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "error parsing reference %q", rawImage)) + return + } + if _, isTagged := namedRef.(reference.Tagged); isTagged && query.AllTags { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Errorf("reference %q must not have a tag for all-tags", rawImage)) + return + } + + authConf, authfile, err := auth.GetCredentials(r) + if err != nil { + utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String())) + return + } + defer auth.RemoveAuthfile(authfile) + + // Setup the registry options + dockerRegistryOptions := image.DockerRegistryOptions{ + DockerRegistryCreds: authConf, + OSChoice: query.OverrideOS, + ArchitectureChoice: query.OverrideArch, + VariantChoice: query.OverrideVariant, + } + if _, found := r.URL.Query()["tlsVerify"]; found { + dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify) + } + + sys := runtime.SystemContext() + if sys == nil { + sys = image.GetSystemContext("", authfile, false) + } + dockerRegistryOptions.DockerCertPath = sys.DockerCertPath + sys.DockerAuthConfig = authConf + + // Prepare the images we want to pull + imagesToPull := []string{} + imageName := namedRef.String() + + if !query.AllTags { + imagesToPull = append(imagesToPull, imageName) + } else { + tags, err := docker.GetRepositoryTags(context.Background(), sys, imageRef) + if err != nil { + utils.InternalServerError(w, errors.Wrap(err, "error getting repository tags")) + return + } + for _, tag := range tags { + imagesToPull = append(imagesToPull, fmt.Sprintf("%s:%s", imageName, tag)) + } + } + + writer := channel.NewWriter(make(chan []byte, 1)) + defer writer.Close() + + stderr := channel.NewWriter(make(chan []byte, 1)) + defer stderr.Close() + + images := make([]string, 0, len(imagesToPull)) + runCtx, cancel := context.WithCancel(context.Background()) + go func(imgs []string) { + defer cancel() + // Finally pull the images + for _, img := range imgs { + newImage, err := runtime.ImageRuntime().New( + runCtx, + img, + "", + authfile, + writer, + &dockerRegistryOptions, + image.SigningOptions{}, + nil, + util.PullImageAlways) + if err != nil { + stderr.Write([]byte(err.Error() + "\n")) + } else { + images = append(images, newImage.ID()) + } + } + }(imagesToPull) + + flush := func() { + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + flush() + + enc := json.NewEncoder(w) + enc.SetEscapeHTML(true) + var failed bool +loop: // break out of for/select infinite loop + for { + var report entities.ImagePullReport + select { + case e := <-writer.Chan(): + report.Stream = string(e) + if err := enc.Encode(report); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-stderr.Chan(): + failed = true + report.Error = string(e) + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + case <-runCtx.Done(): + if !failed { + report.Images = images + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + } + break loop // break out of for/select infinite loop + case <-r.Context().Done(): + // Client has closed connection + break loop // break out of for/select infinite loop + } + } +} diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go index 62fdc05dd..517dccad0 100644 --- a/pkg/api/handlers/utils/handler.go +++ b/pkg/api/handlers/utils/handler.go @@ -43,8 +43,8 @@ var ( // clients to shop for the Version they wish to support APIVersion = map[VersionTree]map[VersionLevel]semver.Version{ LibpodTree: { - CurrentAPIVersion: semver.MustParse("1.0.0"), - MinimalAPIVersion: semver.MustParse("1.0.0"), + CurrentAPIVersion: semver.MustParse("2.0.0"), + MinimalAPIVersion: semver.MustParse("2.0.0"), }, CompatTree: { CurrentAPIVersion: semver.MustParse("1.40.0"), diff --git a/pkg/api/server/handler_api.go b/pkg/api/server/handler_api.go index e47b66bb4..f2ce0301b 100644 --- a/pkg/api/server/handler_api.go +++ b/pkg/api/server/handler_api.go @@ -40,6 +40,10 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc { c = context.WithValue(c, "idletracker", s.idleTracker) //nolint r = r.WithContext(c) + v := utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion] + w.Header().Set("API-Version", fmt.Sprintf("%d.%d", v.Major, v.Minor)) + w.Header().Set("Libpod-API-Version", utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String()) + h(w, r) } fn(w, r) diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go index 5931c2fc9..b2d2543c4 100644 --- a/pkg/api/server/register_archive.go +++ b/pkg/api/server/register_archive.go @@ -9,7 +9,7 @@ import ( ) func (s *APIServer) registerAchiveHandlers(r *mux.Router) error { - // swagger:operation POST /containers/{name}/archive compat putArchive + // swagger:operation PUT /containers/{name}/archive compat putArchive // --- // summary: Put files into a container // description: Put a tar archive of files into a container @@ -84,9 +84,9 @@ func (s *APIServer) registerAchiveHandlers(r *mux.Router) error { // $ref: "#/responses/NoSuchContainer" // 500: // $ref: "#/responses/InternalError" - r.HandleFunc(VersionedPath("/containers/{name}/archive"), s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPost) + r.HandleFunc(VersionedPath("/containers/{name}/archive"), s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPut, http.MethodHead) // Added non version path to URI to support docker non versioned paths - r.HandleFunc("/containers/{name}/archive", s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPost) + r.HandleFunc("/containers/{name}/archive", s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPut, http.MethodHead) /* Libpod diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go new file mode 100644 index 000000000..9082670a7 --- /dev/null +++ b/pkg/bindings/images/build.go @@ -0,0 +1,227 @@ +package images + +import ( + "archive/tar" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/containers/buildah" + "github.com/containers/podman/v2/pkg/bindings" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/docker/go-units" + "github.com/hashicorp/go-multierror" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Build creates an image using a containerfile reference +func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions) (*entities.BuildReport, error) { + params := url.Values{} + + if t := options.Output; len(t) > 0 { + params.Set("t", t) + } + for _, tag := range options.AdditionalTags { + params.Add("t", tag) + } + if options.Quiet { + params.Set("q", "1") + } + if options.NoCache { + params.Set("nocache", "1") + } + // TODO cachefrom + if options.PullPolicy == buildah.PullAlways { + params.Set("pull", "1") + } + if options.RemoveIntermediateCtrs { + params.Set("rm", "1") + } + if options.ForceRmIntermediateCtrs { + params.Set("forcerm", "1") + } + if mem := options.CommonBuildOpts.Memory; mem > 0 { + params.Set("memory", strconv.Itoa(int(mem))) + } + if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { + params.Set("memswap", strconv.Itoa(int(memSwap))) + } + if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 { + params.Set("cpushares", strconv.Itoa(int(cpuShares))) + } + if cpuSetCpus := options.CommonBuildOpts.CPUSetCPUs; len(cpuSetCpus) > 0 { + params.Set("cpusetcpues", cpuSetCpus) + } + if cpuPeriod := options.CommonBuildOpts.CPUPeriod; cpuPeriod > 0 { + params.Set("cpuperiod", strconv.Itoa(int(cpuPeriod))) + } + if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 { + params.Set("cpuquota", strconv.Itoa(int(cpuQuota))) + } + if buildArgs := options.Args; len(buildArgs) > 0 { + bArgs, err := jsoniter.MarshalToString(buildArgs) + if err != nil { + return nil, err + } + params.Set("buildargs", bArgs) + } + if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 { + shmBytes, err := units.RAMInBytes(shmSize) + if err != nil { + return nil, err + } + params.Set("shmsize", strconv.Itoa(int(shmBytes))) + } + if options.Squash { + params.Set("squash", "1") + } + if labels := options.Labels; len(labels) > 0 { + l, err := jsoniter.MarshalToString(labels) + if err != nil { + return nil, err + } + params.Set("labels", l) + } + + stdout := io.Writer(os.Stdout) + if options.Out != nil { + stdout = options.Out + } + + // TODO network? + + var platform string + if OS := options.OS; len(OS) > 0 { + platform += OS + } + if arch := options.Architecture; len(arch) > 0 { + platform += "/" + arch + } + if len(platform) > 0 { + params.Set("platform", platform) + } + + entries := make([]string, len(containerFiles)) + copy(entries, containerFiles) + entries = append(entries, options.ContextDirectory) + tarfile, err := nTar(entries...) + if err != nil { + return nil, err + } + defer tarfile.Close() + params.Set("dockerfile", filepath.Base(containerFiles[0])) + + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params, nil) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if !response.IsSuccess() { + return nil, response.Process(err) + } + + body := response.Body.(io.Reader) + if logrus.IsLevelEnabled(logrus.DebugLevel) { + if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { + if keep, _ := strconv.ParseBool(v); keep { + t, _ := ioutil.TempFile("", "build_*_client") + defer t.Close() + body = io.TeeReader(response.Body, t) + } + } + } + + dec := json.NewDecoder(body) + re := regexp.MustCompile(`[0-9a-f]{12}`) + + var id string + for { + var s struct { + Stream string `json:"stream,omitempty"` + Error string `json:"error,omitempty"` + } + if err := dec.Decode(&s); err != nil { + if errors.Is(err, io.EOF) { + return &entities.BuildReport{ID: id}, nil + } + s.Error = err.Error() + "\n" + } + + switch { + case s.Stream != "": + stdout.Write([]byte(s.Stream)) + if re.Match([]byte(s.Stream)) { + id = s.Stream + } + case s.Error != "": + return nil, errors.New(s.Error) + default: + return &entities.BuildReport{ID: id}, errors.New("failed to parse build results stream, unexpected input") + } + } +} + +func nTar(sources ...string) (io.ReadCloser, error) { + if len(sources) == 0 { + return nil, errors.New("No source(s) provided for build") + } + + pr, pw := io.Pipe() + tw := tar.NewWriter(pw) + + var merr error + go func() { + defer pw.Close() + defer tw.Close() + + for _, src := range sources { + s := src + err := filepath.Walk(s, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() || path == s { + return nil + } + + f, lerr := os.Open(path) + if lerr != nil { + return lerr + } + + name := strings.TrimPrefix(path, s+string(filepath.Separator)) + hdr, lerr := tar.FileInfoHeader(info, name) + if lerr != nil { + f.Close() + return lerr + } + hdr.Name = name + if lerr := tw.WriteHeader(hdr); lerr != nil { + f.Close() + return lerr + } + + _, cerr := io.Copy(tw, f) + f.Close() + return cerr + }) + merr = multierror.Append(merr, err) + } + }() + return pr, merr +} diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go index a80c94025..596491044 100644 --- a/pkg/bindings/images/images.go +++ b/pkg/bindings/images/images.go @@ -1,7 +1,6 @@ package images import ( - "bytes" "context" "fmt" "io" @@ -9,14 +8,11 @@ import ( "net/url" "strconv" - "github.com/containers/buildah" "github.com/containers/image/v5/types" "github.com/containers/podman/v2/pkg/api/handlers" "github.com/containers/podman/v2/pkg/auth" "github.com/containers/podman/v2/pkg/bindings" "github.com/containers/podman/v2/pkg/domain/entities" - "github.com/docker/go-units" - jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" ) @@ -242,112 +238,6 @@ func Untag(ctx context.Context, nameOrID, tag, repo string) error { return response.Process(nil) } -// Build creates an image using a containerfile reference -func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions, tarfile io.Reader) (*entities.BuildReport, error) { - var ( - platform string - report entities.BuildReport - ) - conn, err := bindings.GetClient(ctx) - if err != nil { - return nil, err - } - params := url.Values{} - params.Set("dockerfile", containerFiles[0]) - if t := options.Output; len(t) > 0 { - params.Set("t", t) - } - for _, tag := range options.AdditionalTags { - params.Add("t", tag) - } - // TODO Remote, Quiet - if options.NoCache { - params.Set("nocache", "1") - } - // TODO cachefrom - if options.PullPolicy == buildah.PullAlways { - params.Set("pull", "1") - } - if options.RemoveIntermediateCtrs { - params.Set("rm", "1") - } - if options.ForceRmIntermediateCtrs { - params.Set("forcerm", "1") - } - if mem := options.CommonBuildOpts.Memory; mem > 0 { - params.Set("memory", strconv.Itoa(int(mem))) - } - if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { - params.Set("memswap", strconv.Itoa(int(memSwap))) - } - if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 { - params.Set("cpushares", strconv.Itoa(int(cpuShares))) - } - if cpuSetCpus := options.CommonBuildOpts.CPUSetCPUs; len(cpuSetCpus) > 0 { - params.Set("cpusetcpues", cpuSetCpus) - } - if cpuPeriod := options.CommonBuildOpts.CPUPeriod; cpuPeriod > 0 { - params.Set("cpuperiod", strconv.Itoa(int(cpuPeriod))) - } - if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 { - params.Set("cpuquota", strconv.Itoa(int(cpuQuota))) - } - if buildArgs := options.Args; len(buildArgs) > 0 { - bArgs, err := jsoniter.MarshalToString(buildArgs) - if err != nil { - return nil, err - } - params.Set("buildargs", bArgs) - } - if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 { - shmBytes, err := units.RAMInBytes(shmSize) - if err != nil { - return nil, err - } - params.Set("shmsize", strconv.Itoa(int(shmBytes))) - } - if options.Squash { - params.Set("squash", "1") - } - if labels := options.Labels; len(labels) > 0 { - l, err := jsoniter.MarshalToString(labels) - if err != nil { - return nil, err - } - params.Set("labels", l) - } - - // TODO network? - if OS := options.OS; len(OS) > 0 { - platform += OS - } - if arch := options.Architecture; len(arch) > 0 { - platform += "/" + arch - } - if len(platform) > 0 { - params.Set("platform", platform) - } - // TODO outputs? - - response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params, nil) - if err != nil { - return nil, err - } - var streamReponse []byte - bb := bytes.NewBuffer(streamReponse) - if _, err = io.Copy(bb, response.Body); err != nil { - return nil, err - } - var s struct { - Stream string `json:"stream"` - } - if err := jsoniter.UnmarshalFromString(bb.String(), &s); err != nil { - return nil, err - } - fmt.Print(s.Stream) - return &report, nil -} - // Imports adds the given image to the local image store. This can be done by file and the given reader // or via the url parameter. Additional metadata can be associated with the image by using the changes and // message parameters. The image can also be tagged given a reference. One of url OR r must be provided. @@ -380,51 +270,6 @@ func Import(ctx context.Context, changes []string, message, reference, u *string return &report, response.Process(&report) } -// Pull is the binding for libpod's v2 endpoints for pulling images. Note that -// `rawImage` must be a reference to a registry (i.e., of docker transport or be -// normalized to one). Other transports are rejected as they do not make sense -// in a remote context. -func Pull(ctx context.Context, rawImage string, options entities.ImagePullOptions) ([]string, error) { - conn, err := bindings.GetClient(ctx) - if err != nil { - return nil, err - } - params := url.Values{} - params.Set("reference", rawImage) - params.Set("overrideArch", options.OverrideArch) - params.Set("overrideOS", options.OverrideOS) - params.Set("overrideVariant", options.OverrideVariant) - if options.SkipTLSVerify != types.OptionalBoolUndefined { - // Note: we have to verify if skipped is false. - verifyTLS := bool(options.SkipTLSVerify == types.OptionalBoolFalse) - params.Set("tlsVerify", strconv.FormatBool(verifyTLS)) - } - params.Set("allTags", strconv.FormatBool(options.AllTags)) - - // TODO: have a global system context we can pass around (1st argument) - header, err := auth.Header(nil, options.Authfile, options.Username, options.Password) - if err != nil { - return nil, err - } - - response, err := conn.DoRequest(nil, http.MethodPost, "/images/pull", params, header) - if err != nil { - return nil, err - } - - reports := []handlers.LibpodImagesPullReport{} - if err := response.Process(&reports); err != nil { - return nil, err - } - - pulledImages := []string{} - for _, r := range reports { - pulledImages = append(pulledImages, r.ID) - } - - return pulledImages, nil -} - // Push is the binding for libpod's v2 endpoints for push images. Note that // `source` must be a referring to an image in the remote's container storage. // The destination must be a reference to a registry (i.e., of docker transport diff --git a/pkg/bindings/images/pull.go b/pkg/bindings/images/pull.go new file mode 100644 index 000000000..261a481a2 --- /dev/null +++ b/pkg/bindings/images/pull.go @@ -0,0 +1,98 @@ +package images + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/types" + "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/bindings" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/hashicorp/go-multierror" +) + +// Pull is the binding for libpod's v2 endpoints for pulling images. Note that +// `rawImage` must be a reference to a registry (i.e., of docker transport or be +// normalized to one). Other transports are rejected as they do not make sense +// in a remote context. Progress reported on stderr +func Pull(ctx context.Context, rawImage string, options entities.ImagePullOptions) ([]string, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params := url.Values{} + params.Set("reference", rawImage) + params.Set("overrideArch", options.OverrideArch) + params.Set("overrideOS", options.OverrideOS) + params.Set("overrideVariant", options.OverrideVariant) + + if options.SkipTLSVerify != types.OptionalBoolUndefined { + // Note: we have to verify if skipped is false. + verifyTLS := bool(options.SkipTLSVerify == types.OptionalBoolFalse) + params.Set("tlsVerify", strconv.FormatBool(verifyTLS)) + } + params.Set("allTags", strconv.FormatBool(options.AllTags)) + + // TODO: have a global system context we can pass around (1st argument) + header, err := auth.Header(nil, options.Authfile, options.Username, options.Password) + if err != nil { + return nil, err + } + + response, err := conn.DoRequest(nil, http.MethodPost, "/images/pull", params, header) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if !response.IsSuccess() { + return nil, response.Process(err) + } + + // Historically pull writes status to stderr + stderr := io.Writer(os.Stderr) + if options.Quiet { + stderr = ioutil.Discard + } + + dec := json.NewDecoder(response.Body) + var images []string + var mErr error + for { + var report entities.ImagePullReport + if err := dec.Decode(&report); err != nil { + if errors.Is(err, io.EOF) { + break + } + report.Error = err.Error() + "\n" + } + + select { + case <-response.Request.Context().Done(): + return images, mErr + default: + // non-blocking select + } + + switch { + case report.Stream != "": + fmt.Fprint(stderr, report.Stream) + case report.Error != "": + mErr = multierror.Append(mErr, errors.New(report.Error)) + case len(report.Images) > 0: + images = report.Images + default: + return images, errors.New("failed to parse pull results stream, unexpected input") + } + + } + return images, mErr +} diff --git a/pkg/channel/doc.go b/pkg/channel/doc.go new file mode 100644 index 000000000..656fbddaa --- /dev/null +++ b/pkg/channel/doc.go @@ -0,0 +1,17 @@ +/* +Package channel provides helper structs/methods/funcs for working with channels + +Proxy from an io.Writer to a channel: + + w := channel.NewWriter(make(chan []byte, 10)) + go func() { + w.Write([]byte("Hello, World")) + }() + + fmt.Println(string(<-w.Chan())) + w.Close() + +Use of the constructor is required to initialize the channel. +Provide a channel of sufficient size to handle messages from writer(s). +*/ +package channel diff --git a/pkg/channel/writer.go b/pkg/channel/writer.go new file mode 100644 index 000000000..dbb38e416 --- /dev/null +++ b/pkg/channel/writer.go @@ -0,0 +1,53 @@ +package channel + +import ( + "io" + "sync" + + "github.com/pkg/errors" +) + +// WriteCloser is an io.WriteCloser that that proxies Write() calls to a channel +// The []byte buffer of the Write() is queued on the channel as one message. +type WriteCloser interface { + io.WriteCloser + Chan() <-chan []byte +} + +type writeCloser struct { + ch chan []byte + mux sync.Mutex +} + +// NewWriter initializes a new channel writer +func NewWriter(c chan []byte) WriteCloser { + return &writeCloser{ + ch: c, + } +} + +// Chan returns the R/O channel behind WriteCloser +func (w *writeCloser) Chan() <-chan []byte { + return w.ch +} + +// Write method for WriteCloser +func (w *writeCloser) Write(b []byte) (int, error) { + if w == nil || w.ch == nil { + return 0, errors.New("use channel.NewWriter() to initialize a WriteCloser") + } + + w.mux.Lock() + buf := make([]byte, len(b)) + copy(buf, b) + w.ch <- buf + w.mux.Unlock() + + return len(b), nil +} + +// Close method for WriteCloser +func (w *writeCloser) Close() error { + close(w.ch) + return nil +} diff --git a/pkg/channelwriter/channelwriter.go b/pkg/channelwriter/channelwriter.go deleted file mode 100644 index d51400eb3..000000000 --- a/pkg/channelwriter/channelwriter.go +++ /dev/null @@ -1,34 +0,0 @@ -package channelwriter - -import "github.com/pkg/errors" - -// Writer is an io.writer-like object that "writes" to a channel -// instead of a buffer or file, etc. It is handy for varlink endpoints when -// needing to handle endpoints that do logging "real-time" -type Writer struct { - ByteChannel chan []byte -} - -// NewChannelWriter creates a new channel writer and adds a -// byte slice channel into it. -func NewChannelWriter() *Writer { - byteChannel := make(chan []byte) - return &Writer{ - ByteChannel: byteChannel, - } -} - -// Write method for Writer -func (c *Writer) Write(w []byte) (int, error) { - if c.ByteChannel == nil { - return 0, errors.New("channel writer channel cannot be nil") - } - c.ByteChannel <- w - return len(w), nil -} - -// Close method for Writer -func (c *Writer) Close() error { - close(c.ByteChannel) - return nil -} diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index 2a8133680..d0b738934 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -45,7 +45,7 @@ type Image struct { HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"` } -func (i *Image) Id() string { //nolint +func (i *Image) Id() string { // nolint return i.ID } @@ -70,7 +70,7 @@ type ImageSummary struct { History []string `json:",omitempty"` } -func (i *ImageSummary) Id() string { //nolint +func (i *ImageSummary) Id() string { // nolint return i.ID } @@ -150,7 +150,12 @@ type ImagePullOptions struct { // ImagePullReport is the response from pulling one or more images. type ImagePullReport struct { - Images []string + // Stream used to provide output from c/image + Stream string `json:"stream,omitempty"` + // Error contains text of errors from c/image + Error string `json:"error,omitempty"` + // Images contains the ID's of the images pulled + Images []string `json:"images,omitempty"` } // ImagePushOptions are the arguments for pushing images. @@ -259,16 +264,17 @@ type ImageLoadReport struct { } type ImageImportOptions struct { - Changes []string - Message string - Quiet bool - Reference string - Source string - SourceIsURL bool + Changes []string + Message string + Quiet bool + Reference string + SignaturePolicy string + Source string + SourceIsURL bool } type ImageImportReport struct { - Id string //nolint + Id string // nolint } // ImageSaveOptions provide options for saving images. @@ -348,7 +354,7 @@ type ImageUnmountOptions struct { // ImageMountReport describes the response from image mount type ImageMountReport struct { Err error - Id string //nolint + Id string // nolint Name string Repositories []string Path string @@ -357,5 +363,5 @@ type ImageMountReport struct { // ImageUnmountReport describes the response from umounting an image type ImageUnmountReport struct { Err error - Id string //nolint + Id string // nolint } diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 23aef9573..cc62c3f27 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -467,7 +467,7 @@ func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions) } func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOptions) (*entities.ImageImportReport, error) { - id, err := ir.Libpod.Import(ctx, opts.Source, opts.Reference, opts.Changes, opts.Message, opts.Quiet) + id, err := ir.Libpod.Import(ctx, opts.Source, opts.Reference, opts.SignaturePolicy, opts.Changes, opts.Message, opts.Quiet) if err != nil { return nil, err } @@ -535,6 +535,7 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) { } func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { + id, _, err := ir.Libpod.Build(ctx, opts.BuildOptions, containerFiles...) if err != nil { return nil, err diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go index 7ec84246d..3e47dc67a 100644 --- a/pkg/domain/infra/abi/images_list.go +++ b/pkg/domain/infra/abi/images_list.go @@ -23,33 +23,13 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) summaries := []*entities.ImageSummary{} for _, img := range images { - var repoTags []string - if opts.All { - pairs, err := libpodImage.ReposToMap(img.Names()) - if err != nil { - return nil, err - } - - for repo, tags := range pairs { - for _, tag := range tags { - repoTags = append(repoTags, repo+":"+tag) - } - } - } else { - repoTags, err = img.RepoTags() - if err != nil { - return nil, err - } - } - digests := make([]string, len(img.Digests())) for j, d := range img.Digests() { digests[j] = string(d) } e := entities.ImageSummary{ - ID: img.ID(), - + ID: img.ID(), ConfigDigest: string(img.ConfigDigest), Created: img.Created().Unix(), Dangling: img.Dangling(), @@ -61,7 +41,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ReadOnly: img.IsReadOnly(), SharedSize: 0, VirtualSize: img.VirtualSize, - RepoTags: repoTags, + RepoTags: img.Names(), // may include tags and digests } e.Labels, _ = img.Labels(context.TODO()) diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go index 6dfb52c63..aa6aeede2 100644 --- a/pkg/domain/infra/abi/play.go +++ b/pkg/domain/infra/abi/play.go @@ -132,6 +132,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY libpod.WithInfraContainer(), libpod.WithPodName(podName), } + + if podYAML.ObjectMeta.Labels != nil { + podOptions = append(podOptions, libpod.WithPodLabels(podYAML.ObjectMeta.Labels)) + } + // TODO we only configure Process namespace. We also need to account for Host{IPC,Network,PID} // which is not currently possible with pod create if podYAML.Spec.ShareProcessNamespace != nil && *podYAML.Spec.ShareProcessNamespace { @@ -294,6 +299,18 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY return nil, err } + var ctrRestartPolicy string + switch podYAML.Spec.RestartPolicy { + case v1.RestartPolicyAlways: + ctrRestartPolicy = libpod.RestartPolicyAlways + case v1.RestartPolicyOnFailure: + ctrRestartPolicy = libpod.RestartPolicyOnFailure + case v1.RestartPolicyNever: + ctrRestartPolicy = libpod.RestartPolicyNo + default: // Default to Always + ctrRestartPolicy = libpod.RestartPolicyAlways + } + containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers)) for _, container := range podYAML.Spec.Containers { pullPolicy := util.PullImageMissing @@ -321,6 +338,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY if err != nil { return nil, err } + conf.RestartPolicy = ctrRestartPolicy ctr, err := createconfig.CreateContainerFromCreateConfig(ctx, ic.Libpod, conf, pod) if err != nil { return nil, err diff --git a/pkg/domain/infra/abi/terminal/terminal.go b/pkg/domain/infra/abi/terminal/terminal.go index 0b6e57f49..48f5749d5 100644 --- a/pkg/domain/infra/abi/terminal/terminal.go +++ b/pkg/domain/infra/abi/terminal/terminal.go @@ -6,7 +6,7 @@ import ( "os/signal" lsignal "github.com/containers/podman/v2/pkg/signal" - "github.com/docker/docker/pkg/term" + "github.com/moby/term" "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/client-go/tools/remotecommand" diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go index f9b8106ef..26c9c7e2e 100644 --- a/pkg/domain/infra/runtime_libpod.go +++ b/pkg/domain/infra/runtime_libpod.go @@ -227,23 +227,6 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo // TODO flag to set CNI plugins dir? - // TODO I don't think these belong here? - // Will follow up with a different PR to address - // - // Pod create options - - infraImageFlag := fs.Lookup("infra-image") - if infraImageFlag != nil && infraImageFlag.Changed { - infraImage, _ := fs.GetString("infra-image") - options = append(options, libpod.WithDefaultInfraImage(infraImage)) - } - - infraCommandFlag := fs.Lookup("infra-command") - if infraCommandFlag != nil && infraImageFlag.Changed { - infraCommand, _ := fs.GetString("infra-command") - options = append(options, libpod.WithDefaultInfraCommand(infraCommand)) - } - if !opts.withFDS { options = append(options, libpod.WithEnableSDNotify()) } diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index 185cc2f9a..332a7c2eb 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -1,13 +1,9 @@ package tunnel import ( - "archive/tar" - "bytes" "context" - "io" "io/ioutil" "os" - "path/filepath" "strings" "time" @@ -18,9 +14,7 @@ import ( "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/domain/utils" utils2 "github.com/containers/podman/v2/utils" - "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) { @@ -311,28 +305,23 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) { return config.Default() } -func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { - var tarReader io.Reader - tarfile, err := archive.Tar(opts.ContextDirectory, 0) +func (ir *ImageEngine) Build(_ context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { + report, err := images.Build(ir.ClientCxt, containerFiles, opts) if err != nil { return nil, err } - tarReader = tarfile - cwd, err := os.Getwd() - if err != nil { - return nil, err - } - if cwd != opts.ContextDirectory { - fn := func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) { - h.Name = filepath.Join(filepath.Base(opts.ContextDirectory), h.Name) - return nil, false, false, nil - } - tarReader, err = transformArchive(tarfile, false, fn) + // For remote clients, if the option for writing to a file was + // selected, we need to write to the *client's* filesystem. + if len(opts.IIDFile) > 0 { + f, err := os.Create(opts.IIDFile) if err != nil { return nil, err } + if _, err := f.WriteString(report.ID); err != nil { + return nil, err + } } - return images.Build(ir.ClientCxt, containerFiles, opts, tarReader) + return report, nil } func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) { @@ -346,65 +335,3 @@ func (ir *ImageEngine) Shutdown(_ context.Context) { func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) { return nil, errors.New("not implemented yet") } - -// Sourced from openshift image builder - -// TransformFileFunc is given a chance to transform an arbitrary input file. -type TransformFileFunc func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) - -// filterArchive transforms the provided input archive to a new archive, -// giving the fn a chance to transform arbitrary files. -func filterArchive(r io.Reader, w io.Writer, fn TransformFileFunc) error { - tr := tar.NewReader(r) - tw := tar.NewWriter(w) - - var body io.Reader = tr - - for { - h, err := tr.Next() - if err == io.EOF { - return tw.Close() - } - if err != nil { - return err - } - - name := h.Name - data, ok, skip, err := fn(h, tr) - logrus.Debugf("Transform %q -> %q: data=%t ok=%t skip=%t err=%v", name, h.Name, data != nil, ok, skip, err) - if err != nil { - return err - } - if skip { - continue - } - if ok { - h.Size = int64(len(data)) - body = bytes.NewBuffer(data) - } - if err := tw.WriteHeader(h); err != nil { - return err - } - if _, err := io.Copy(tw, body); err != nil { - return err - } - } -} - -func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Reader, error) { - var cwe error - pr, pw := io.Pipe() - go func() { - if compressed { - in, err := archive.DecompressStream(r) - if err != nil { - cwe = pw.CloseWithError(err) - return - } - r = in - } - err := filterArchive(r, pw, fn) - cwe = pw.CloseWithError(err) - }() - return pr, cwe -} diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go index fda4c098c..2ac3b376f 100644 --- a/pkg/specgen/generate/container_create.go +++ b/pkg/specgen/generate/container_create.go @@ -95,7 +95,7 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener if len(names) > 0 { imgName = names[0] } - options = append(options, libpod.WithRootFSFromImage(newImage.ID(), imgName, s.Image)) + options = append(options, libpod.WithRootFSFromImage(newImage.ID(), imgName, s.RawImageName)) } if err := s.Validate(); err != nil { return nil, errors.Wrap(err, "invalid config provided") diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go index 0bd39d5a4..101201252 100644 --- a/pkg/specgen/generate/pod_create.go +++ b/pkg/specgen/generate/pod_create.go @@ -84,6 +84,15 @@ func createPodOptions(p *specgen.PodSpecGenerator, rt *libpod.Runtime) ([]libpod if len(p.CNINetworks) > 0 { options = append(options, libpod.WithPodNetworks(p.CNINetworks)) } + + if len(p.InfraImage) > 0 { + options = append(options, libpod.WithInfraImage(p.InfraImage)) + } + + if len(p.InfraCommand) > 0 { + options = append(options, libpod.WithInfraCommand(p.InfraCommand)) + } + switch p.NetNS.NSMode { case specgen.Bridge, specgen.Default, "": logrus.Debugf("Pod using default network mode") diff --git a/pkg/specgen/pod_validate.go b/pkg/specgen/pod_validate.go index d5e0aecf2..907c0bb69 100644 --- a/pkg/specgen/pod_validate.go +++ b/pkg/specgen/pod_validate.go @@ -95,12 +95,5 @@ func (p *PodSpecGenerator) Validate() error { return exclusivePodOptions("NoManageHosts", "HostAdd") } - // Set Defaults - if len(p.InfraImage) < 1 { - p.InfraImage = containerConfig.Engine.InfraImage - } - if len(p.InfraCommand) < 1 { - p.InfraCommand = []string{containerConfig.Engine.InfraCommand} - } return nil } diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go index cca05eddb..b8f37ec7a 100644 --- a/pkg/specgen/specgen.go +++ b/pkg/specgen/specgen.go @@ -89,6 +89,9 @@ type ContainerBasicConfig struct { // If not given, a default location will be used. // Optional. ConmonPidFile string `json:"conmon_pid_file,omitempty"` + // RawImageName is the user-specified and unprocessed input referring + // to a local or a remote image. + RawImageName string `json:"raw_image_name,omitempty"` // RestartPolicy is the container's restart policy - an action which // will be taken when the container exits. // If not given, the default policy, which does nothing, will be used. diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go index 1dcfa2213..4bcf70b0d 100644 --- a/pkg/varlinkapi/images.go +++ b/pkg/varlinkapi/images.go @@ -23,7 +23,7 @@ import ( "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/image" - "github.com/containers/podman/v2/pkg/channelwriter" + "github.com/containers/podman/v2/pkg/channel" "github.com/containers/podman/v2/pkg/util" iopodman "github.com/containers/podman/v2/pkg/varlink" "github.com/containers/podman/v2/utils" @@ -570,7 +570,7 @@ func (i *VarlinkAPI) Commit(call iopodman.VarlinkCall, name, imageName string, c log []string mimeType string ) - output := channelwriter.NewChannelWriter() + output := channel.NewWriter(make(chan []byte)) channelClose := func() { if err := output.Close(); err != nil { logrus.Errorf("failed to close channel writer: %q", err) @@ -704,7 +704,7 @@ func (i *VarlinkAPI) PullImage(call iopodman.VarlinkCall, name string, creds iop if call.WantsMore() { call.Continues = true } - output := channelwriter.NewChannelWriter() + output := channel.NewWriter(make(chan []byte)) channelClose := func() { if err := output.Close(); err != nil { logrus.Errorf("failed to close channel writer: %q", err) diff --git a/pkg/varlinkapi/util.go b/pkg/varlinkapi/util.go index 748d8f9cc..7f96965f0 100644 --- a/pkg/varlinkapi/util.go +++ b/pkg/varlinkapi/util.go @@ -11,7 +11,7 @@ import ( "github.com/containers/buildah" "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" - "github.com/containers/podman/v2/pkg/channelwriter" + "github.com/containers/podman/v2/pkg/channel" iopodman "github.com/containers/podman/v2/pkg/varlink" "github.com/containers/storage/pkg/archive" ) @@ -201,7 +201,7 @@ func makePsOpts(inOpts iopodman.PsOpts) PsOptions { // more. it is capable of sending updates as the output writer gets them or append them // all to a log. the chan error is the error from the libpod call so we can honor // and error event in that case. -func forwardOutput(log []string, c chan error, wantsMore bool, output *channelwriter.Writer, reply func(br iopodman.MoreResponse) error) ([]string, error) { +func forwardOutput(log []string, c chan error, wantsMore bool, output channel.WriteCloser, reply func(br iopodman.MoreResponse) error) ([]string, error) { done := false for { select { @@ -214,7 +214,7 @@ func forwardOutput(log []string, c chan error, wantsMore bool, output *channelwr done = true // if no error is found, we pull what we can from the log writer and // append it to log string slice - case line := <-output.ByteChannel: + case line := <-output.Chan(): log = append(log, string(line)) // If the end point is being used in more mode, send what we have if wantsMore { |