diff options
Diffstat (limited to 'pkg')
83 files changed, 2011 insertions, 996 deletions
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index b1ef08cda..3a904ba87 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -17,6 +17,7 @@ import ( "github.com/docker/go-connections/nat" "github.com/gorilla/schema" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) func RemoveContainer(w http.ResponseWriter, r *http.Request) { @@ -44,8 +45,25 @@ func RemoveContainer(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) name := utils.GetName(r) con, err := runtime.LookupContainer(name) - if err != nil { - utils.ContainerNotFound(w, name, err) + if err != nil && errors.Cause(err) == define.ErrNoSuchCtr { + // Failed to get container. If force is specified, get the container's ID + // and evict it + if !query.Force { + utils.ContainerNotFound(w, name, err) + return + } + + if _, err := runtime.EvictContainer(r.Context(), name, query.Vols); err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr { + logrus.Debugf("Ignoring error (--allow-missing): %q", err) + w.WriteHeader(http.StatusNoContent) + return + } + logrus.Warn(errors.Wrapf(err, "Failed to evict container: %q", name)) + utils.InternalServerError(w, err) + return + } + w.WriteHeader(http.StatusNoContent) return } @@ -85,7 +103,7 @@ func ListContainers(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } - if _, found := r.URL.Query()["limit"]; found && query.Limit != -1 { + if _, found := r.URL.Query()["limit"]; found && query.Limit > 0 { last := query.Limit if len(containers) > last { containers = containers[len(containers)-last:] @@ -175,6 +193,7 @@ func KillContainer(w http.ResponseWriter, r *http.Request) { err = con.Kill(signal) if err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "unable to kill Container %s", name)) + return } // Docker waits for the container to stop if the signal is 0 or diff --git a/pkg/api/handlers/compat/containers_attach.go b/pkg/api/handlers/compat/containers_attach.go index e20d48d86..4a1196c89 100644 --- a/pkg/api/handlers/compat/containers_attach.go +++ b/pkg/api/handlers/compat/containers_attach.go @@ -6,7 +6,7 @@ import ( "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/pkg/api/handlers/utils" - "github.com/containers/podman/v2/pkg/api/server/idletracker" + "github.com/containers/podman/v2/pkg/api/server/idle" "github.com/gorilla/schema" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -92,7 +92,7 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) { return } - idleTracker := r.Context().Value("idletracker").(*idletracker.IdleTracker) + idleTracker := r.Context().Value("idletracker").(*idle.Tracker) hijackChan := make(chan bool, 1) // Perform HTTP attach. @@ -109,7 +109,7 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) { // We do need to tell the idle tracker that the // connection has been closed, though. We can guarantee // that is true after HTTPAttach exits. - idleTracker.TrackHijackedClosed() + idleTracker.Close() } else { // A hijack was not successfully completed. We need to // report the error normally. diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go index 93e4fe540..0579da8de 100644 --- a/pkg/api/handlers/compat/containers_create.go +++ b/pkg/api/handlers/compat/containers_create.go @@ -82,7 +82,13 @@ func makeCreateConfig(ctx context.Context, containerConfig *config.Config, input } } - workDir := "/" + workDir, err := newImage.WorkingDir(ctx) + if err != nil { + return createconfig.CreateConfig{}, err + } + if workDir == "" { + workDir = "/" + } if len(input.WorkingDir) > 0 { workDir = input.WorkingDir } @@ -169,6 +175,11 @@ func makeCreateConfig(ctx context.Context, containerConfig *config.Config, input // away incorrectly formatted variables so we cannot reuse the // parsing of the env input // [Foo Other=one Blank=] + imgEnv, err := newImage.Env(ctx) + if err != nil { + return createconfig.CreateConfig{}, err + } + input.Env = append(imgEnv, input.Env...) for _, e := range input.Env { splitEnv := strings.Split(e, "=") switch len(splitEnv) { @@ -210,7 +221,7 @@ func makeCreateConfig(ctx context.Context, containerConfig *config.Config, input ImageID: newImage.ID(), BuiltinImgVolumes: nil, // podman ImageVolumeType: "", // podman - Interactive: false, + Interactive: input.OpenStdin, // IpcMode: input.HostConfig.IpcMode, Labels: input.Labels, LogDriver: input.HostConfig.LogConfig.Type, // is this correct diff --git a/pkg/api/handlers/compat/containers_logs.go b/pkg/api/handlers/compat/containers_logs.go index f6d4a518e..d24b7d959 100644 --- a/pkg/api/handlers/compat/containers_logs.go +++ b/pkg/api/handlers/compat/containers_logs.go @@ -105,6 +105,18 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { var frame strings.Builder header := make([]byte, 8) + + writeHeader := true + // Docker does not write stream headers iff the container has a tty. + if !utils.IsLibpodRequest(r) { + inspectData, err := ctnr.Inspect(false) + if err != nil { + utils.InternalServerError(w, errors.Wrapf(err, "Failed to obtain logs for Container '%s'", name)) + return + } + writeHeader = !inspectData.Config.Tty + } + for line := range logChannel { if _, found := r.URL.Query()["until"]; found { if line.Time.After(until) { @@ -138,10 +150,13 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { } frame.WriteString(line.Msg) - binary.BigEndian.PutUint32(header[4:], uint32(frame.Len())) - if _, err := w.Write(header[0:8]); err != nil { - log.Errorf("unable to write log output header: %q", err) + if writeHeader { + binary.BigEndian.PutUint32(header[4:], uint32(frame.Len())) + if _, err := w.Write(header[0:8]); err != nil { + log.Errorf("unable to write log output header: %q", err) + } } + if _, err := io.WriteString(w, frame.String()); err != nil { log.Errorf("unable to write frame string: %q", err) } diff --git a/pkg/api/handlers/compat/containers_stats.go b/pkg/api/handlers/compat/containers_stats.go index 3d7d49ad3..16bd0518a 100644 --- a/pkg/api/handlers/compat/containers_stats.go +++ b/pkg/api/handlers/compat/containers_stats.go @@ -75,32 +75,48 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) { } } - for ok := true; ok; ok = query.Stream { + // Write header and content type. + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + + // Setup JSON encoder for streaming. + coder := json.NewEncoder(w) + coder.SetEscapeHTML(true) + +streamLabel: // A label to flatten the scope + select { + case <-r.Context().Done(): + logrus.Debugf("Client connection (container stats) cancelled") + + default: // Container stats stats, err := ctnr.GetContainerStats(stats) if err != nil { - utils.InternalServerError(w, err) + logrus.Errorf("Unable to get container stats: %v", err) return } inspect, err := ctnr.Inspect(false) if err != nil { - utils.InternalServerError(w, err) + logrus.Errorf("Unable to inspect container: %v", err) return } // Cgroup stats cgroupPath, err := ctnr.CGroupPath() if err != nil { - utils.InternalServerError(w, err) + logrus.Errorf("Unable to get cgroup path of container: %v", err) return } cgroup, err := cgroups.Load(cgroupPath) if err != nil { - utils.InternalServerError(w, err) + logrus.Errorf("Unable to load cgroup: %v", err) return } cgroupStat, err := cgroup.Stat() if err != nil { - utils.InternalServerError(w, err) + logrus.Errorf("Unable to get cgroup stats: %v", err) return } @@ -175,11 +191,18 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) { Networks: net, } - utils.WriteJSON(w, http.StatusOK, s) + if err := coder.Encode(s); err != nil { + logrus.Errorf("Unable to encode stats: %v", err) + return + } if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } + if !query.Stream { + return + } + preRead = s.Read bits, err := json.Marshal(s.CPUStats) if err != nil { @@ -189,10 +212,8 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) { logrus.Errorf("Unable to unmarshal previous stats: %q", err) } - // Only sleep when we're streaming. - if query.Stream { - time.Sleep(DefaultStatsPeriod) - } + time.Sleep(DefaultStatsPeriod) + goto streamLabel } } diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go index 289bf4a2d..fbb33410f 100644 --- a/pkg/api/handlers/compat/events.go +++ b/pkg/api/handlers/compat/events.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "net/http" - "sync" "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/events" @@ -113,8 +112,13 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { errorChannel <- runtime.Events(r.Context(), readOpts) }() - var coder *jsoniter.Encoder - var writeHeader sync.Once + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + coder := json.NewEncoder(w) + coder.SetEscapeHTML(true) for stream := true; stream; stream = query.Stream { select { @@ -124,18 +128,6 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { return } case evt := <-eventChannel: - writeHeader.Do(func() { - // Use a sync.Once so that we write the header - // only once. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - coder = json.NewEncoder(w) - coder.SetEscapeHTML(true) - }) - if evt == nil { continue } diff --git a/pkg/api/handlers/compat/exec.go b/pkg/api/handlers/compat/exec.go index 1db950f85..df51293c2 100644 --- a/pkg/api/handlers/compat/exec.go +++ b/pkg/api/handlers/compat/exec.go @@ -10,7 +10,7 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/pkg/api/handlers" "github.com/containers/podman/v2/pkg/api/handlers/utils" - "github.com/containers/podman/v2/pkg/api/server/idletracker" + "github.com/containers/podman/v2/pkg/api/server/idle" "github.com/containers/podman/v2/pkg/specgen/generate" "github.com/gorilla/mux" "github.com/pkg/errors" @@ -174,7 +174,7 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) { return } - idleTracker := r.Context().Value("idletracker").(*idletracker.IdleTracker) + idleTracker := r.Context().Value("idletracker").(*idle.Tracker) hijackChan := make(chan bool, 1) if err := sessionCtr.ExecHTTPStartAndAttach(sessionID, r, w, nil, nil, nil, hijackChan); err != nil { @@ -186,7 +186,7 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) { // We do need to tell the idle tracker that the // connection has been closed, though. We can guarantee // that is true after HTTPAttach exits. - idleTracker.TrackHijackedClosed() + idleTracker.Close() } else { // A hijack was not successfully completed. We need to // report the error normally. diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go index 8765e20ca..940b57343 100644 --- a/pkg/api/handlers/compat/images.go +++ b/pkg/api/handlers/compat/images.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/containers/buildah" + "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" "github.com/containers/podman/v2/libpod" image2 "github.com/containers/podman/v2/libpod/image" @@ -17,7 +18,6 @@ import ( "github.com/containers/podman/v2/pkg/api/handlers/utils" "github.com/containers/podman/v2/pkg/auth" "github.com/containers/podman/v2/pkg/domain/entities" - "github.com/containers/podman/v2/pkg/util" "github.com/docker/docker/api/types" "github.com/gorilla/schema" "github.com/pkg/errors" @@ -93,7 +93,7 @@ func PruneImages(w http.ResponseWriter, r *http.Request) { }) } - //FIXME/TODO to do this exactly correct, pruneimages needs to return idrs and space-reclaimed, then we are golden + // FIXME/TODO to do this exactly correct, pruneimages needs to return idrs and space-reclaimed, then we are golden ipr := types.ImagesPruneReport{ ImagesDeleted: idr, SpaceReclaimed: 1, // TODO we cannot supply this right now @@ -113,7 +113,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) { Changes string `schema:"changes"` Comment string `schema:"comment"` Container string `schema:"container"` - //fromSrc string # fromSrc is currently unused + // fromSrc string # fromSrc is currently unused Pause bool `schema:"pause"` Repo string `schema:"repo"` Tag string `schema:"tag"` @@ -205,7 +205,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to write temporary file")) } } - iid, err := runtime.Import(r.Context(), source, "", query.Changes, "", false) + iid, err := runtime.Import(r.Context(), source, "", "", query.Changes, "", false) if err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to import tarball")) return @@ -224,7 +224,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) { Status string `json:"status"` Progress string `json:"progress"` ProgressDetail map[string]string `json:"progressDetail"` - Id string `json:"id"` //nolint + Id string `json:"id"` // nolint }{ Status: iid, ProgressDetail: map[string]string{}, @@ -257,9 +257,9 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) { fromImage = fmt.Sprintf("%s:%s", fromImage, query.Tag) } - authConf, authfile, err := auth.GetCredentials(r) + authConf, authfile, key, err := auth.GetCredentials(r) if err != nil { - utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String())) + utils.Error(w, "Failed to retrieve repository credentials", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) @@ -268,6 +268,16 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) { if sys := runtime.SystemContext(); sys != nil { registryOpts.DockerCertPath = sys.DockerCertPath } + rtc, err := runtime.GetConfig() + if err != nil { + utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()")) + return + } + pullPolicy, err := config.ValidatePullPolicy(rtc.Engine.PullPolicy) + if err != nil { + utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()")) + return + } img, err := runtime.ImageRuntime().New(r.Context(), fromImage, "", // signature policy @@ -276,7 +286,7 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) { ®istryOpts, image2.SigningOptions{}, nil, // label - util.PullImageMissing, + pullPolicy, ) if err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err) @@ -289,7 +299,7 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) { Error string `json:"error"` Progress string `json:"progress"` ProgressDetail map[string]string `json:"progressDetail"` - Id string `json:"id"` //nolint + Id string `json:"id"` // nolint }{ Status: fmt.Sprintf("pulling image (%s) from %s", img.Tag, strings.Join(img.Names(), ", ")), ProgressDetail: map[string]string{}, diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go index 9601f5e18..d5ccf56fe 100644 --- a/pkg/api/handlers/compat/images_build.go +++ b/pkg/api/handlers/compat/images_build.go @@ -1,8 +1,7 @@ package compat import ( - "bytes" - "encoding/base64" + "context" "encoding/json" "fmt" "io" @@ -11,28 +10,21 @@ import ( "os" "path/filepath" "strconv" - "strings" "github.com/containers/buildah" "github.com/containers/buildah/imagebuildah" + "github.com/containers/image/v5/types" "github.com/containers/podman/v2/libpod" - "github.com/containers/podman/v2/pkg/api/handlers" "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/channel" "github.com/containers/storage/pkg/archive" "github.com/gorilla/schema" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) func BuildImage(w http.ResponseWriter, r *http.Request) { - authConfigs := map[string]handlers.AuthConfig{} - if hdr, found := r.Header["X-Registry-Config"]; found && len(hdr) > 0 { - authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(hdr[0])) - if json.NewDecoder(authConfigsJSON).Decode(&authConfigs) != nil { - utils.BadRequest(w, "X-Registry-Config", hdr[0], json.NewDecoder(authConfigsJSON).Decode(&authConfigs)) - return - } - } - if hdr, found := r.Header["Content-Type"]; found && len(hdr) > 0 { contentType := hdr[0] switch contentType { @@ -47,86 +39,80 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } } - anchorDir, err := extractTarFile(r) + contextDirectory, err := extractTarFile(r) if err != nil { utils.InternalServerError(w, err) return } - defer os.RemoveAll(anchorDir) + + defer func() { + if logrus.IsLevelEnabled(logrus.DebugLevel) { + if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { + if keep, _ := strconv.ParseBool(v); keep { + return + } + } + } + err := os.RemoveAll(filepath.Dir(contextDirectory)) + if err != nil { + logrus.Warn(errors.Wrapf(err, "failed to remove build scratch directory %q", filepath.Dir(contextDirectory))) + } + }() query := struct { + BuildArgs string `schema:"buildargs"` + CacheFrom string `schema:"cachefrom"` + CpuPeriod uint64 `schema:"cpuperiod"` // nolint + CpuQuota int64 `schema:"cpuquota"` // nolint + CpuSetCpus string `schema:"cpusetcpus"` // nolint + CpuShares uint64 `schema:"cpushares"` // nolint Dockerfile string `schema:"dockerfile"` - Tag []string `schema:"t"` ExtraHosts string `schema:"extrahosts"` - Remote string `schema:"remote"` - Quiet bool `schema:"q"` + ForceRm bool `schema:"forcerm"` + HTTPProxy bool `schema:"httpproxy"` + Labels string `schema:"labels"` + MemSwap int64 `schema:"memswap"` + Memory int64 `schema:"memory"` + NetworkMode string `schema:"networkmode"` NoCache bool `schema:"nocache"` - CacheFrom string `schema:"cachefrom"` + Outputs string `schema:"outputs"` + Platform string `schema:"platform"` Pull bool `schema:"pull"` + Quiet bool `schema:"q"` + Registry string `schema:"registry"` + Remote string `schema:"remote"` Rm bool `schema:"rm"` - ForceRm bool `schema:"forcerm"` - Memory int64 `schema:"memory"` - MemSwap int64 `schema:"memswap"` - CpuShares uint64 `schema:"cpushares"` //nolint - CpuSetCpus string `schema:"cpusetcpus"` //nolint - CpuPeriod uint64 `schema:"cpuperiod"` //nolint - CpuQuota int64 `schema:"cpuquota"` //nolint - BuildArgs string `schema:"buildargs"` ShmSize int `schema:"shmsize"` Squash bool `schema:"squash"` - Labels string `schema:"labels"` - NetworkMode string `schema:"networkmode"` - Platform string `schema:"platform"` + Tag []string `schema:"t"` Target string `schema:"target"` - Outputs string `schema:"outputs"` - Registry string `schema:"registry"` }{ - Dockerfile: "Dockerfile", - Tag: []string{}, - ExtraHosts: "", - Remote: "", - Quiet: false, - NoCache: false, - CacheFrom: "", - Pull: false, - Rm: true, - ForceRm: false, - Memory: 0, - MemSwap: 0, - CpuShares: 0, - CpuSetCpus: "", - CpuPeriod: 0, - CpuQuota: 0, - BuildArgs: "", - ShmSize: 64 * 1024 * 1024, - Squash: false, - Labels: "", - NetworkMode: "", - Platform: "", - Target: "", - Outputs: "", - Registry: "docker.io", + Dockerfile: "Dockerfile", + Registry: "docker.io", + Rm: true, + ShmSize: 64 * 1024 * 1024, + Tag: []string{}, } + decoder := r.Context().Value("decoder").(*schema.Decoder) if err := decoder.Decode(&query, r.URL.Query()); err != nil { utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err) return } - var ( - output string - additionalNames []string - ) + var output string if len(query.Tag) > 0 { output = query.Tag[0] } + if _, found := r.URL.Query()["target"]; found { + output = query.Target + } + + var additionalNames []string if len(query.Tag) > 1 { additionalNames = query.Tag[1:] } - if _, found := r.URL.Query()["target"]; found { - output = query.Target - } var buildArgs = map[string]string{} if _, found := r.URL.Query()["buildargs"]; found { if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil { @@ -156,95 +142,149 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } } - // build events will be recorded here - var ( - buildEvents = []string{} - progress = bytes.Buffer{} - ) + creds, authfile, key, err := auth.GetCredentials(r) + if err != nil { + // Credential value(s) not returned as their value is not human readable + utils.BadRequest(w, key.String(), "n/a", err) + return + } + defer auth.RemoveAuthfile(authfile) + + // Channels all mux'ed in select{} below to follow API build protocol + stdout := channel.NewWriter(make(chan []byte, 1)) + defer stdout.Close() + + auxout := channel.NewWriter(make(chan []byte, 1)) + defer auxout.Close() + + stderr := channel.NewWriter(make(chan []byte, 1)) + defer stderr.Close() + + reporter := channel.NewWriter(make(chan []byte, 1)) + defer reporter.Close() buildOptions := imagebuildah.BuildOptions{ - ContextDirectory: filepath.Join(anchorDir, "build"), + ContextDirectory: contextDirectory, PullPolicy: pullPolicy, Registry: query.Registry, IgnoreUnrecognizedInstructions: true, Quiet: query.Quiet, Isolation: buildah.IsolationChroot, - Runtime: "", - RuntimeArgs: nil, - TransientMounts: nil, Compression: archive.Gzip, Args: buildArgs, Output: output, AdditionalTags: additionalNames, - Log: func(format string, args ...interface{}) { - buildEvents = append(buildEvents, fmt.Sprintf(format, args...)) + Out: stdout, + Err: auxout, + ReportWriter: reporter, + OutputFormat: buildah.Dockerv2ImageManifest, + SystemContext: &types.SystemContext{ + AuthFilePath: authfile, + DockerAuthConfig: creds, }, - In: nil, - Out: &progress, - Err: &progress, - SignaturePolicyPath: "", - ReportWriter: &progress, - OutputFormat: buildah.Dockerv2ImageManifest, - SystemContext: nil, - NamespaceOptions: nil, - ConfigureNetwork: 0, - CNIPluginPath: "", - CNIConfigDir: "", - IDMappingOptions: nil, - AddCapabilities: nil, - DropCapabilities: nil, CommonBuildOpts: &buildah.CommonBuildOptions{ - AddHost: nil, - CgroupParent: "", - CPUPeriod: query.CpuPeriod, - CPUQuota: query.CpuQuota, - CPUShares: query.CpuShares, - CPUSetCPUs: query.CpuSetCpus, - CPUSetMems: "", - HTTPProxy: false, - Memory: query.Memory, - DNSSearch: nil, - DNSServers: nil, - DNSOptions: nil, - MemorySwap: query.MemSwap, - LabelOpts: nil, - SeccompProfilePath: "", - ApparmorProfile: "", - ShmSize: strconv.Itoa(query.ShmSize), - Ulimit: nil, - Volumes: nil, + CPUPeriod: query.CpuPeriod, + CPUQuota: query.CpuQuota, + CPUShares: query.CpuShares, + CPUSetCPUs: query.CpuSetCpus, + HTTPProxy: query.HTTPProxy, + Memory: query.Memory, + MemorySwap: query.MemSwap, + ShmSize: strconv.Itoa(query.ShmSize), }, - DefaultMountsFilePath: "", - IIDFile: "", Squash: query.Squash, Labels: labels, - Annotations: nil, - OnBuild: nil, - Layers: false, NoCache: query.NoCache, RemoveIntermediateCtrs: query.Rm, ForceRmIntermediateCtrs: query.ForceRm, - BlobDirectory: "", Target: query.Target, - Devices: nil, } runtime := r.Context().Value("runtime").(*libpod.Runtime) - id, _, err := runtime.Build(r.Context(), buildOptions, query.Dockerfile) - if err != nil { - utils.InternalServerError(w, err) - return + runCtx, cancel := context.WithCancel(context.Background()) + var imageID string + go func() { + defer cancel() + imageID, _, err = runtime.Build(r.Context(), buildOptions, query.Dockerfile) + if err != nil { + stderr.Write([]byte(err.Error() + "\n")) + } + }() + + flush := func() { + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } } - // Find image ID that was built... - utils.WriteResponse(w, http.StatusOK, - struct { - Stream string `json:"stream"` - }{ - Stream: progress.String() + "\n" + - strings.Join(buildEvents, "\n") + - fmt.Sprintf("\nSuccessfully built %s\n", id), - }) + // Send headers and prime client for stream to come + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + flush() + + var failed bool + + body := w.(io.Writer) + if logrus.IsLevelEnabled(logrus.DebugLevel) { + if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { + if keep, _ := strconv.ParseBool(v); keep { + t, _ := ioutil.TempFile("", "build_*_server") + defer t.Close() + body = io.MultiWriter(t, w) + } + } + } + + enc := json.NewEncoder(body) + enc.SetEscapeHTML(true) +loop: + for { + m := struct { + Stream string `json:"stream,omitempty"` + Error string `json:"error,omitempty"` + }{} + + select { + case e := <-stdout.Chan(): + m.Stream = string(e) + if err := enc.Encode(m); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-auxout.Chan(): + m.Stream = string(e) + if err := enc.Encode(m); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-reporter.Chan(): + m.Stream = string(e) + if err := enc.Encode(m); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-stderr.Chan(): + failed = true + m.Error = string(e) + if err := enc.Encode(m); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + case <-runCtx.Done(): + if !failed { + if utils.IsLibpodRequest(r) { + m.Stream = imageID + } else { + m.Stream = fmt.Sprintf("Successfully built %12.12s\n", imageID) + } + if err := enc.Encode(m); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + } + break loop + } + } } func extractTarFile(r *http.Request) (string, error) { @@ -253,10 +293,9 @@ func extractTarFile(r *http.Request) (string, error) { if err != nil { return "", err } - buildDir := filepath.Join(anchorDir, "build") path := filepath.Join(anchorDir, "tarBall") - tarBall, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + tarBall, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return "", err } @@ -265,14 +304,17 @@ func extractTarFile(r *http.Request) (string, error) { // Content-Length not used as too many existing API clients didn't honor it _, err = io.Copy(tarBall, r.Body) r.Body.Close() - if err != nil { return "", fmt.Errorf("failed Request: Unable to copy tar file from request body %s", r.RequestURI) } - _, _ = tarBall.Seek(0, 0) - if err := archive.Untar(tarBall, buildDir, &archive.TarOptions{}); err != nil { + buildDir := filepath.Join(anchorDir, "build") + err = os.Mkdir(buildDir, 0700) + if err != nil { return "", err } - return anchorDir, nil + + _, _ = tarBall.Seek(0, 0) + err = archive.Untar(tarBall, buildDir, nil) + return buildDir, err } diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go index e69a2212a..dd706a156 100644 --- a/pkg/api/handlers/compat/images_push.go +++ b/pkg/api/handlers/compat/images_push.go @@ -49,9 +49,9 @@ func PushImage(w http.ResponseWriter, r *http.Request) { return } - authConf, authfile, err := auth.GetCredentials(r) + authConf, authfile, key, err := auth.GetCredentials(r) if err != nil { - utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String())) + utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) diff --git a/pkg/api/handlers/compat/ping.go b/pkg/api/handlers/compat/ping.go index eb7eed5b6..06150bb63 100644 --- a/pkg/api/handlers/compat/ping.go +++ b/pkg/api/handlers/compat/ping.go @@ -5,7 +5,6 @@ import ( "net/http" "github.com/containers/buildah" - "github.com/containers/podman/v2/pkg/api/handlers/utils" ) // Ping returns headers to client about the service @@ -14,13 +13,12 @@ import ( // Clients will use the Header availability to test which backend engine is in use. // Note: Additionally handler supports GET and HEAD methods func Ping(w http.ResponseWriter, r *http.Request) { - w.Header().Set("API-Version", utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion].String()) + // Note API-Version and Libpod-API-Version are set in handler_api.go w.Header().Set("BuildKit-Version", "") w.Header().Set("Docker-Experimental", "true") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Pragma", "no-cache") - w.Header().Set("Libpod-API-Version", utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String()) w.Header().Set("Libpod-Buildha-Version", buildah.Version) w.WriteHeader(http.StatusOK) diff --git a/pkg/api/handlers/compat/version.go b/pkg/api/handlers/compat/version.go index e12c7cefa..92900b75d 100644 --- a/pkg/api/handlers/compat/version.go +++ b/pkg/api/handlers/compat/version.go @@ -30,6 +30,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "Failed to obtain system memory info")) return } + components := []docker.ComponentVersion{{ Name: "Podman Engine", Version: versionInfo.Version, @@ -46,6 +47,9 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { }, }} + apiVersion := utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion] + minVersion := utils.APIVersion[utils.CompatTree][utils.MinimalAPIVersion] + utils.WriteResponse(w, http.StatusOK, entities.ComponentVersion{ Version: docker.Version{ Platform: struct { @@ -53,7 +57,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { }{ Name: fmt.Sprintf("%s/%s/%s-%s", goRuntime.GOOS, goRuntime.GOARCH, infoData.Host.Distribution.Distribution, infoData.Host.Distribution.Version), }, - APIVersion: components[0].Details["APIVersion"], + APIVersion: fmt.Sprintf("%d.%d", apiVersion.Major, apiVersion.Minor), Arch: components[0].Details["Arch"], BuildTime: components[0].Details["BuildTime"], Components: components, @@ -61,7 +65,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { GitCommit: components[0].Details["GitCommit"], GoVersion: components[0].Details["GoVersion"], KernelVersion: components[0].Details["KernelVersion"], - MinAPIVersion: components[0].Details["MinAPIVersion"], + MinAPIVersion: fmt.Sprintf("%d.%d", minVersion.Major, minVersion.Minor), Os: components[0].Details["Os"], Version: components[0].Version, }}) diff --git a/pkg/api/handlers/compat/volumes.go b/pkg/api/handlers/compat/volumes.go index 976c52acb..a45509fdb 100644 --- a/pkg/api/handlers/compat/volumes.go +++ b/pkg/api/handlers/compat/volumes.go @@ -93,6 +93,29 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) { return } + // See if the volume exists already + existingVolume, err := runtime.GetVolume(input.Name) + if err != nil && errors.Cause(err) != define.ErrNoSuchVolume { + utils.InternalServerError(w, err) + return + } + + // if using the compat layer and the volume already exists, we + // must return a 201 with the same information as create + if existingVolume != nil && !utils.IsLibpodRequest(r) { + response := docker_api_types.Volume{ + CreatedAt: existingVolume.CreatedTime().Format(time.RFC3339), + Driver: existingVolume.Driver(), + Labels: existingVolume.Labels(), + Mountpoint: existingVolume.MountPoint(), + Name: existingVolume.Name(), + Options: existingVolume.Options(), + Scope: existingVolume.Scope(), + } + utils.WriteResponse(w, http.StatusCreated, response) + return + } + if len(input.Name) > 0 { volumeOptions = append(volumeOptions, libpod.WithVolumeName(input.Name)) } diff --git a/pkg/api/handlers/libpod/containers_stats.go b/pkg/api/handlers/libpod/containers_stats.go new file mode 100644 index 000000000..4d5abe118 --- /dev/null +++ b/pkg/api/handlers/libpod/containers_stats.go @@ -0,0 +1,72 @@ +package libpod + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" + "github.com/gorilla/schema" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const DefaultStatsPeriod = 5 * time.Second + +func StatsContainer(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + + query := struct { + Containers []string `schema:"containers"` + Stream bool `schema:"stream"` + }{ + Stream: true, + } + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) + return + } + + // Reduce code duplication and use the local/abi implementation of + // container stats. + containerEngine := abi.ContainerEngine{Libpod: runtime} + + statsOptions := entities.ContainerStatsOptions{ + Stream: query.Stream, + } + + // Stats will stop if the connection is closed. + statsChan, err := containerEngine.ContainerStats(r.Context(), query.Containers, statsOptions) + if err != nil { + utils.InternalServerError(w, err) + return + } + + // Write header and content type. + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + + // Setup JSON encoder for streaming. + coder := json.NewEncoder(w) + coder.SetEscapeHTML(true) + + for stats := range statsChan { + if err := coder.Encode(stats); err != nil { + // Note: even when streaming, the stats goroutine will + // be notified (and stop) as the connection will be + // closed. + logrus.Errorf("Unable to encode stats: %v", err) + return + } + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } +} diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go index 85f7903dc..3054922c2 100644 --- a/pkg/api/handlers/libpod/images.go +++ b/pkg/api/handlers/libpod/images.go @@ -11,8 +11,6 @@ import ( "strings" "github.com/containers/buildah" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/podman/v2/libpod" @@ -25,7 +23,6 @@ import ( "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/domain/infra/abi" "github.com/containers/podman/v2/pkg/errorhandling" - "github.com/containers/podman/v2/pkg/util" utils2 "github.com/containers/podman/v2/utils" "github.com/gorilla/schema" "github.com/pkg/errors" @@ -391,7 +388,7 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) { tmpfile.Close() source = tmpfile.Name() } - importedImage, err := runtime.Import(context.Background(), source, query.Reference, query.Changes, query.Message, true) + importedImage, err := runtime.Import(context.Background(), source, query.Reference, "", query.Changes, query.Message, true) if err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to import image")) return @@ -400,123 +397,6 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) { utils.WriteResponse(w, http.StatusOK, entities.ImageImportReport{Id: importedImage}) } -// ImagesPull is the v2 libpod endpoint for pulling images. Note that the -// mandatory `reference` must be a reference to a registry (i.e., of docker -// transport or be normalized to one). Other transports are rejected as they -// do not make sense in a remote context. -func ImagesPull(w http.ResponseWriter, r *http.Request) { - runtime := r.Context().Value("runtime").(*libpod.Runtime) - decoder := r.Context().Value("decoder").(*schema.Decoder) - query := struct { - Reference string `schema:"reference"` - OverrideOS string `schema:"overrideOS"` - OverrideArch string `schema:"overrideArch"` - OverrideVariant string `schema:"overrideVariant"` - TLSVerify bool `schema:"tlsVerify"` - AllTags bool `schema:"allTags"` - }{ - TLSVerify: true, - } - - if err := decoder.Decode(&query, r.URL.Query()); err != nil { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) - return - } - - if len(query.Reference) == 0 { - utils.InternalServerError(w, errors.New("reference parameter cannot be empty")) - return - } - - imageRef, err := utils.ParseDockerReference(query.Reference) - if err != nil { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Wrapf(err, "image destination %q is not a docker-transport reference", query.Reference)) - return - } - - // Trim the docker-transport prefix. - rawImage := strings.TrimPrefix(query.Reference, fmt.Sprintf("%s://", docker.Transport.Name())) - - // all-tags doesn't work with a tagged reference, so let's check early - namedRef, err := reference.Parse(rawImage) - if err != nil { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Wrapf(err, "error parsing reference %q", rawImage)) - return - } - if _, isTagged := namedRef.(reference.Tagged); isTagged && query.AllTags { - utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, - errors.Errorf("reference %q must not have a tag for all-tags", rawImage)) - return - } - - authConf, authfile, err := auth.GetCredentials(r) - if err != nil { - utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String())) - return - } - defer auth.RemoveAuthfile(authfile) - - // Setup the registry options - dockerRegistryOptions := image.DockerRegistryOptions{ - DockerRegistryCreds: authConf, - OSChoice: query.OverrideOS, - ArchitectureChoice: query.OverrideArch, - VariantChoice: query.OverrideVariant, - } - if _, found := r.URL.Query()["tlsVerify"]; found { - dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify) - } - - sys := runtime.SystemContext() - if sys == nil { - sys = image.GetSystemContext("", authfile, false) - } - dockerRegistryOptions.DockerCertPath = sys.DockerCertPath - sys.DockerAuthConfig = authConf - - // Prepare the images we want to pull - imagesToPull := []string{} - res := []handlers.LibpodImagesPullReport{} - imageName := namedRef.String() - - if !query.AllTags { - imagesToPull = append(imagesToPull, imageName) - } else { - tags, err := docker.GetRepositoryTags(context.Background(), sys, imageRef) - if err != nil { - utils.InternalServerError(w, errors.Wrap(err, "error getting repository tags")) - return - } - for _, tag := range tags { - imagesToPull = append(imagesToPull, fmt.Sprintf("%s:%s", imageName, tag)) - } - } - - // Finally pull the images - for _, img := range imagesToPull { - newImage, err := runtime.ImageRuntime().New( - context.Background(), - img, - "", - authfile, - os.Stderr, - &dockerRegistryOptions, - image.SigningOptions{}, - nil, - util.PullImageAlways) - if err != nil { - utils.InternalServerError(w, err) - return - } - res = append(res, handlers.LibpodImagesPullReport{ID: newImage.ID()}) - } - - utils.WriteResponse(w, http.StatusOK, res) -} - // PushImage is the handler for the compat http endpoint for pushing images. func PushImage(w http.ResponseWriter, r *http.Request) { decoder := r.Context().Value("decoder").(*schema.Decoder) @@ -558,9 +438,9 @@ func PushImage(w http.ResponseWriter, r *http.Request) { return } - authConf, authfile, err := auth.GetCredentials(r) + authConf, authfile, key, err := auth.GetCredentials(r) if err != nil { - utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String())) + utils.Error(w, "Failed to retrieve repository credentials", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) @@ -680,24 +560,41 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) { func UntagImage(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) - name := utils.GetName(r) - newImage, err := runtime.ImageRuntime().NewFromLocal(name) - if err != nil { - utils.ImageNotFound(w, name, errors.Wrapf(err, "Failed to find image %s", name)) - return - } - tag := "latest" - if len(r.Form.Get("tag")) > 0 { - tag = r.Form.Get("tag") - } - if len(r.Form.Get("repo")) < 1 { + tags := []string{} // Note: if empty, all tags will be removed from the image. + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + + // Do the parameter dance. + switch { + // If tag is set, repo must be as well. + case len(repo) == 0 && len(tag) > 0: utils.Error(w, "repo tag is required", http.StatusBadRequest, errors.New("repo parameter is required to tag an image")) return + + case len(repo) == 0: + break + + // If repo is specified, we need to add that to the tags. + default: + if len(tag) == 0 { + // Normalize tag to "latest" if empty. + tag = "latest" + } + tags = append(tags, fmt.Sprintf("%s:%s", repo, tag)) } - repo := r.Form.Get("repo") - tagName := fmt.Sprintf("%s:%s", repo, tag) - if err := newImage.UntagImage(tagName); err != nil { - utils.Error(w, "failed to untag", http.StatusInternalServerError, err) + + // Now use the ABI implementation to prevent us from having duplicate + // code. + opts := entities.ImageUntagOptions{} + imageEngine := abi.ImageEngine{Libpod: runtime} + + name := utils.GetName(r) + if err := imageEngine.Untag(r.Context(), name, tags, opts); err != nil { + if errors.Cause(err) == define.ErrNoSuchImage { + utils.ImageNotFound(w, name, errors.Wrapf(err, "Failed to find image %s", name)) + } else { + utils.Error(w, "failed to untag", http.StatusInternalServerError, err) + } return } utils.WriteResponse(w, http.StatusCreated, "") diff --git a/pkg/api/handlers/libpod/images_pull.go b/pkg/api/handlers/libpod/images_pull.go new file mode 100644 index 000000000..791ef7a48 --- /dev/null +++ b/pkg/api/handlers/libpod/images_pull.go @@ -0,0 +1,202 @@ +package libpod + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/libpod/image" + "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/channel" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/util" + "github.com/gorilla/schema" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ImagesPull is the v2 libpod endpoint for pulling images. Note that the +// mandatory `reference` must be a reference to a registry (i.e., of docker +// transport or be normalized to one). Other transports are rejected as they +// do not make sense in a remote context. +func ImagesPull(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + query := struct { + Reference string `schema:"reference"` + OverrideOS string `schema:"overrideOS"` + OverrideArch string `schema:"overrideArch"` + OverrideVariant string `schema:"overrideVariant"` + TLSVerify bool `schema:"tlsVerify"` + AllTags bool `schema:"allTags"` + }{ + TLSVerify: true, + } + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + return + } + + if len(query.Reference) == 0 { + utils.InternalServerError(w, errors.New("reference parameter cannot be empty")) + return + } + + imageRef, err := utils.ParseDockerReference(query.Reference) + if err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "image destination %q is not a docker-transport reference", query.Reference)) + return + } + + // Trim the docker-transport prefix. + rawImage := strings.TrimPrefix(query.Reference, fmt.Sprintf("%s://", docker.Transport.Name())) + + // all-tags doesn't work with a tagged reference, so let's check early + namedRef, err := reference.Parse(rawImage) + if err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "error parsing reference %q", rawImage)) + return + } + if _, isTagged := namedRef.(reference.Tagged); isTagged && query.AllTags { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Errorf("reference %q must not have a tag for all-tags", rawImage)) + return + } + + authConf, authfile, key, err := auth.GetCredentials(r) + if err != nil { + utils.Error(w, "Failed to retrieve repository credentials", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", key, r.URL.String())) + return + } + defer auth.RemoveAuthfile(authfile) + + // Setup the registry options + dockerRegistryOptions := image.DockerRegistryOptions{ + DockerRegistryCreds: authConf, + OSChoice: query.OverrideOS, + ArchitectureChoice: query.OverrideArch, + VariantChoice: query.OverrideVariant, + } + if _, found := r.URL.Query()["tlsVerify"]; found { + dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify) + } + + sys := runtime.SystemContext() + if sys == nil { + sys = image.GetSystemContext("", authfile, false) + } + dockerRegistryOptions.DockerCertPath = sys.DockerCertPath + sys.DockerAuthConfig = authConf + + // Prepare the images we want to pull + imagesToPull := []string{} + imageName := namedRef.String() + + if !query.AllTags { + imagesToPull = append(imagesToPull, imageName) + } else { + tags, err := docker.GetRepositoryTags(context.Background(), sys, imageRef) + if err != nil { + utils.InternalServerError(w, errors.Wrap(err, "error getting repository tags")) + return + } + for _, tag := range tags { + imagesToPull = append(imagesToPull, fmt.Sprintf("%s:%s", imageName, tag)) + } + } + + writer := channel.NewWriter(make(chan []byte, 1)) + defer writer.Close() + + stderr := channel.NewWriter(make(chan []byte, 1)) + defer stderr.Close() + + images := make([]string, 0, len(imagesToPull)) + runCtx, cancel := context.WithCancel(context.Background()) + go func(imgs []string) { + defer cancel() + // Finally pull the images + for _, img := range imgs { + newImage, err := runtime.ImageRuntime().New( + runCtx, + img, + "", + authfile, + writer, + &dockerRegistryOptions, + image.SigningOptions{}, + nil, + util.PullImageAlways) + if err != nil { + stderr.Write([]byte(err.Error() + "\n")) + } else { + images = append(images, newImage.ID()) + } + } + }(imagesToPull) + + flush := func() { + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + flush() + + enc := json.NewEncoder(w) + enc.SetEscapeHTML(true) + var failed bool +loop: // break out of for/select infinite loop + for { + var report entities.ImagePullReport + select { + case e := <-writer.Chan(): + report.Stream = string(e) + if err := enc.Encode(report); err != nil { + stderr.Write([]byte(err.Error())) + } + flush() + case e := <-stderr.Chan(): + failed = true + report.Error = string(e) + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + flush() + case <-runCtx.Done(): + if !failed { + // Send all image id's pulled in 'images' stanza + report.Images = images + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + + report.Images = nil + // Pull last ID from list and publish in 'id' stanza. This maintains previous API contract + report.ID = images[len(images)-1] + if err := enc.Encode(report); err != nil { + logrus.Warnf("Failed to json encode error %q", err.Error()) + } + + flush() + } + break loop // break out of for/select infinite loop + case <-r.Context().Done(): + // Client has closed connection + break loop // break out of for/select infinite loop + } + } +} diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go index 8e65248e2..2031dd42f 100644 --- a/pkg/api/handlers/libpod/manifests.go +++ b/pkg/api/handlers/libpod/manifests.go @@ -6,11 +6,13 @@ import ( "github.com/containers/buildah/manifests" copy2 "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/image" "github.com/containers/podman/v2/pkg/api/handlers" "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/domain/infra/abi" "github.com/gorilla/schema" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -48,17 +50,18 @@ func ManifestCreate(w http.ResponseWriter, r *http.Request) { func ManifestInspect(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) name := utils.GetName(r) - newImage, err := runtime.ImageRuntime().NewFromLocal(name) - if err != nil { - utils.ImageNotFound(w, name, err) + imageEngine := abi.ImageEngine{Libpod: runtime} + inspectReport, inspectError := imageEngine.ManifestInspect(r.Context(), name) + if inspectError != nil { + utils.Error(w, "Something went wrong.", http.StatusNotFound, inspectError) return } - data, err := newImage.InspectManifest() - if err != nil { - utils.InternalServerError(w, err) + var list manifest.Schema2List + if err := json.Unmarshal(inspectReport, &list); err != nil { + utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Unmarshal()")) return } - utils.WriteResponse(w, http.StatusOK, data) + utils.WriteResponse(w, http.StatusOK, &list) } func ManifestAdd(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/api/handlers/libpod/networks.go b/pkg/api/handlers/libpod/networks.go index dfece2a4e..b3c4840b8 100644 --- a/pkg/api/handlers/libpod/networks.go +++ b/pkg/api/handlers/libpod/networks.go @@ -92,8 +92,8 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) { } if reports[0].Err != nil { // If the network cannot be found, we return a 404. - if errors.Cause(err) == define.ErrNoSuchNetwork { - utils.Error(w, "Something went wrong", http.StatusNotFound, err) + if errors.Cause(reports[0].Err) == define.ErrNoSuchNetwork { + utils.Error(w, "Something went wrong", http.StatusNotFound, reports[0].Err) return } } diff --git a/pkg/api/handlers/libpod/play.go b/pkg/api/handlers/libpod/play.go index 59f78da8c..2296e170a 100644 --- a/pkg/api/handlers/libpod/play.go +++ b/pkg/api/handlers/libpod/play.go @@ -48,9 +48,9 @@ func PlayKube(w http.ResponseWriter, r *http.Request) { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "error closing temporary file")) return } - authConf, authfile, err := auth.GetCredentials(r) + authConf, authfile, key, err := auth.GetCredentials(r) if err != nil { - utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String())) + utils.Error(w, "Failed to retrieve repository credentials", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", key, r.URL.String())) return } defer auth.RemoveAuthfile(authfile) diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index 0ccaa95bb..9e503dbb0 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -33,7 +33,7 @@ type LibpodImagesLoadReport struct { } type LibpodImagesPullReport struct { - ID string `json:"id"` + entities.ImagePullReport } // LibpodImagesRemoveReport is the return type for image removal via the rest diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go index 62fdc05dd..517dccad0 100644 --- a/pkg/api/handlers/utils/handler.go +++ b/pkg/api/handlers/utils/handler.go @@ -43,8 +43,8 @@ var ( // clients to shop for the Version they wish to support APIVersion = map[VersionTree]map[VersionLevel]semver.Version{ LibpodTree: { - CurrentAPIVersion: semver.MustParse("1.0.0"), - MinimalAPIVersion: semver.MustParse("1.0.0"), + CurrentAPIVersion: semver.MustParse("2.0.0"), + MinimalAPIVersion: semver.MustParse("2.0.0"), }, CompatTree: { CurrentAPIVersion: semver.MustParse("1.40.0"), diff --git a/pkg/api/server/handler_api.go b/pkg/api/server/handler_api.go index e47b66bb4..28f5a0b42 100644 --- a/pkg/api/server/handler_api.go +++ b/pkg/api/server/handler_api.go @@ -7,7 +7,9 @@ import ( "runtime" "github.com/containers/podman/v2/pkg/api/handlers/utils" - log "github.com/sirupsen/logrus" + "github.com/containers/podman/v2/pkg/auth" + "github.com/google/uuid" + "github.com/sirupsen/logrus" ) // APIHandler is a wrapper to enhance HandlerFunc's and remove redundant code @@ -19,7 +21,7 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc { if err != nil { buf := make([]byte, 1<<20) n := runtime.Stack(buf, true) - log.Warnf("Recovering from API handler panic: %v, %s", err, buf[:n]) + logrus.Warnf("Recovering from API handler panic: %v, %s", err, buf[:n]) // Try to inform client things went south... won't work if handler already started writing response body utils.InternalServerError(w, fmt.Errorf("%v", err)) } @@ -27,19 +29,39 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc { // Wrapper to hide some boiler plate fn := func(w http.ResponseWriter, r *http.Request) { - log.Debugf("APIHandler -- Method: %s URL: %s", r.Method, r.URL.String()) + rid := uuid.New().String() + if logrus.IsLevelEnabled(logrus.DebugLevel) { + logrus.Debugf("APIHandler(%s) -- Method: %s URL: %s", rid, r.Method, r.URL.String()) + for k, v := range r.Header { + switch auth.HeaderAuthName(k) { + case auth.XRegistryConfigHeader, auth.XRegistryAuthHeader: + logrus.Debugf("APIHandler(%s) -- Header: %s: <hidden>", rid, k) + default: + logrus.Debugf("APIHandler(%s) -- Header: %s: %v", rid, k, v) + } + } + } + // Set in case handler wishes to correlate logging events + r.Header.Set("X-Reference-Id", rid) if err := r.ParseForm(); err != nil { - log.Infof("Failed Request: unable to parse form: %q", err) + logrus.Infof("Failed Request: unable to parse form: %q (%s)", err, rid) } // TODO: Use r.ConnContext when ported to go 1.13 - c := context.WithValue(r.Context(), "decoder", s.Decoder) //nolint - c = context.WithValue(c, "runtime", s.Runtime) //nolint - c = context.WithValue(c, "shutdownFunc", s.Shutdown) //nolint - c = context.WithValue(c, "idletracker", s.idleTracker) //nolint + c := context.WithValue(r.Context(), "decoder", s.Decoder) // nolint + c = context.WithValue(c, "runtime", s.Runtime) // nolint + c = context.WithValue(c, "shutdownFunc", s.Shutdown) // nolint + c = context.WithValue(c, "idletracker", s.idleTracker) // nolint r = r.WithContext(c) + cv := utils.APIVersion[utils.CompatTree][utils.CurrentAPIVersion] + w.Header().Set("API-Version", fmt.Sprintf("%d.%d", cv.Major, cv.Minor)) + + lv := utils.APIVersion[utils.LibpodTree][utils.CurrentAPIVersion].String() + w.Header().Set("Libpod-API-Version", lv) + w.Header().Set("Server", "Libpod/"+lv+" ("+runtime.GOOS+")") + h(w, r) } fn(w, r) diff --git a/pkg/api/server/idle/tracker.go b/pkg/api/server/idle/tracker.go new file mode 100644 index 000000000..1b378c492 --- /dev/null +++ b/pkg/api/server/idle/tracker.go @@ -0,0 +1,96 @@ +package idle + +import ( + "net" + "net/http" + "sync" + "time" + + "github.com/sirupsen/logrus" +) + +// Tracker holds the state for the server's idle tracking +type Tracker struct { + // Duration is the API idle window + Duration time.Duration + hijacked int // count of active connections managed by handlers + managed map[net.Conn]struct{} // set of active connections managed by http package + mux sync.Mutex // protect managed map + timer *time.Timer + total int // total number of connections made to this server instance +} + +// NewTracker creates and initializes a new Tracker object +// For best behavior, duration should be 2x http idle connection timeout +func NewTracker(idle time.Duration) *Tracker { + return &Tracker{ + managed: make(map[net.Conn]struct{}), + Duration: idle, + timer: time.NewTimer(idle), + } +} + +// ConnState is called on HTTP connection state changes. +// - Once StateHijacked, StateClose is _NOT_ called on that connection +// - There are two "idle" timeouts, the http idle connection (not to be confused with the TCP/IP idle socket timeout) +// and the API idle window. The caller should set the http idle timeout to 2x the time provided to NewTacker() which +// is the API idle window. +func (t *Tracker) ConnState(conn net.Conn, state http.ConnState) { + t.mux.Lock() + defer t.mux.Unlock() + + logrus.Debugf("IdleTracker %p:%v %dm+%dh/%dt connection(s)", conn, state, len(t.managed), t.hijacked, t.TotalConnections()) + switch state { + case http.StateNew, http.StateActive: + // stop the API timer when the server transitions any connection to an "active" state + t.managed[conn] = struct{}{} + t.timer.Stop() + t.total++ + case http.StateHijacked: + // hijacked connections should call Close() when finished. + // Note: If a handler hijack's a connection and then doesn't Close() it, + // the API timer will not fire and the server will _NOT_ timeout. + delete(t.managed, conn) + t.hijacked++ + case http.StateIdle: + // When any connection goes into the http idle state, we know: + // - we have an active connection + // - the API timer should not be counting down (See case StateNew/StateActive) + break + case http.StateClosed: + oldActive := t.ActiveConnections() + + // Either the server or a hijacking handler has closed the http connection to a client + if _, found := t.managed[conn]; found { + delete(t.managed, conn) + } else { + t.hijacked-- // guarded by t.mux above + } + + // Transitioned from any "active" connection to no connections + if oldActive > 0 && t.ActiveConnections() == 0 { + t.timer.Stop() // See library source for Reset() issues and why they are not fixed + t.timer.Reset(t.Duration) // Restart the API window timer + } + } +} + +// Close is used to update Tracker that a StateHijacked connection has been closed by handler (StateClosed) +func (t *Tracker) Close() { + t.ConnState(nil, http.StateClosed) +} + +// ActiveConnections returns the number of current managed or StateHijacked connections +func (t *Tracker) ActiveConnections() int { + return len(t.managed) + t.hijacked +} + +// TotalConnections returns total number of connections made to this instance of the service +func (t *Tracker) TotalConnections() int { + return t.total +} + +// Done is called when idle timer has expired +func (t *Tracker) Done() <-chan time.Time { + return t.timer.C +} diff --git a/pkg/api/server/idletracker/idletracker.go b/pkg/api/server/idletracker/idletracker.go deleted file mode 100644 index 1ee905a99..000000000 --- a/pkg/api/server/idletracker/idletracker.go +++ /dev/null @@ -1,74 +0,0 @@ -package idletracker - -import ( - "net" - "net/http" - "sync" - "time" - - "github.com/sirupsen/logrus" -) - -type IdleTracker struct { - http map[net.Conn]struct{} - hijacked int - total int - mux sync.Mutex - timer *time.Timer - Duration time.Duration -} - -func NewIdleTracker(idle time.Duration) *IdleTracker { - return &IdleTracker{ - http: make(map[net.Conn]struct{}), - Duration: idle, - timer: time.NewTimer(idle), - } -} - -func (t *IdleTracker) ConnState(conn net.Conn, state http.ConnState) { - t.mux.Lock() - defer t.mux.Unlock() - - oldActive := t.ActiveConnections() - logrus.Debugf("IdleTracker %p:%v %d/%d connection(s)", conn, state, oldActive, t.TotalConnections()) - switch state { - case http.StateNew, http.StateActive: - t.http[conn] = struct{}{} - // stop the timer if we transitioned from idle - if oldActive == 0 { - t.timer.Stop() - } - t.total++ - case http.StateHijacked: - // hijacked connections are handled elsewhere - delete(t.http, conn) - t.hijacked++ - case http.StateIdle, http.StateClosed: - delete(t.http, conn) - // Restart the timer if we've become idle - if oldActive > 0 && len(t.http) == 0 { - t.timer.Stop() - t.timer.Reset(t.Duration) - } - } -} - -func (t *IdleTracker) TrackHijackedClosed() { - t.mux.Lock() - defer t.mux.Unlock() - - t.hijacked-- -} - -func (t *IdleTracker) ActiveConnections() int { - return len(t.http) + t.hijacked -} - -func (t *IdleTracker) TotalConnections() int { - return t.total -} - -func (t *IdleTracker) Done() <-chan time.Time { - return t.timer.C -} diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go index 5931c2fc9..b2d2543c4 100644 --- a/pkg/api/server/register_archive.go +++ b/pkg/api/server/register_archive.go @@ -9,7 +9,7 @@ import ( ) func (s *APIServer) registerAchiveHandlers(r *mux.Router) error { - // swagger:operation POST /containers/{name}/archive compat putArchive + // swagger:operation PUT /containers/{name}/archive compat putArchive // --- // summary: Put files into a container // description: Put a tar archive of files into a container @@ -84,9 +84,9 @@ func (s *APIServer) registerAchiveHandlers(r *mux.Router) error { // $ref: "#/responses/NoSuchContainer" // 500: // $ref: "#/responses/InternalError" - r.HandleFunc(VersionedPath("/containers/{name}/archive"), s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPost) + r.HandleFunc(VersionedPath("/containers/{name}/archive"), s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPut, http.MethodHead) // Added non version path to URI to support docker non versioned paths - r.HandleFunc("/containers/{name}/archive", s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPost) + r.HandleFunc("/containers/{name}/archive", s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPut, http.MethodHead) /* Libpod diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go index 0ad5d29ea..870c6a90c 100644 --- a/pkg/api/server/register_containers.go +++ b/pkg/api/server/register_containers.go @@ -1013,7 +1013,7 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // tags: // - containers // summary: Get stats for a container - // description: This returns a live stream of a container’s resource usage statistics. + // description: DEPRECATED. This endpoint will be removed with the next major release. Please use /libpod/containers/stats instead. // parameters: // - in: path // name: name @@ -1035,6 +1035,35 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error { // 500: // $ref: "#/responses/InternalError" r.HandleFunc(VersionedPath("/libpod/containers/{name}/stats"), s.APIHandler(compat.StatsContainer)).Methods(http.MethodGet) + // swagger:operation GET /libpod/containers/stats libpod libpodStatsContainers + // --- + // tags: + // - containers + // summary: Get stats for one or more containers + // description: Return a live stream of resource usage statistics of one or more container. If no container is specified, the statistics of all containers are returned. + // parameters: + // - in: query + // name: containers + // description: names or IDs of containers + // type: array + // items: + // type: string + // - in: query + // name: stream + // type: boolean + // default: true + // description: Stream the output + // produces: + // - application/json + // responses: + // 200: + // description: no error + // 404: + // $ref: "#/responses/NoSuchContainer" + // 500: + // $ref: "#/responses/InternalError" + r.HandleFunc(VersionedPath("/libpod/containers/stats"), s.APIHandler(libpod.StatsContainer)).Methods(http.MethodGet) + // swagger:operation GET /libpod/containers/{name}/top libpod libpodTopContainer // --- // tags: diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go index b1007fe09..cb0d26d1e 100644 --- a/pkg/api/server/register_images.go +++ b/pkg/api/server/register_images.go @@ -1175,7 +1175,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // tags: // - images // summary: Untag an image - // description: Untag an image + // description: Untag an image. If not repo and tag are specified, all tags are removed from the image. // parameters: // - in: path // name: name:.* @@ -1423,6 +1423,13 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // description: | // output configuration TBD // (As of version 1.xx) + // - in: query + // name: httpproxy + // type: boolean + // default: + // description: | + // Inject http proxy environment variables into container + // (As of version 2.0.0) // produces: // - application/json // responses: diff --git a/pkg/api/server/register_volumes.go b/pkg/api/server/register_volumes.go index 22488b158..aa0f67604 100644 --- a/pkg/api/server/register_volumes.go +++ b/pkg/api/server/register_volumes.go @@ -154,7 +154,9 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error { // parameters: // - in: body // name: create - // description: attributes for creating a container + // description: | + // attributes for creating a container. + // Note: If a volume by the same name exists, a 201 response with that volume's information will be generated. // schema: // $ref: "#/definitions/DockerVolumeCreate" // produces: diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go index e7c031234..09a9f6370 100644 --- a/pkg/api/server/server.go +++ b/pkg/api/server/server.go @@ -16,7 +16,7 @@ import ( "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/pkg/api/handlers" - "github.com/containers/podman/v2/pkg/api/server/idletracker" + "github.com/containers/podman/v2/pkg/api/server/idle" "github.com/coreos/go-systemd/v22/activation" "github.com/coreos/go-systemd/v22/daemon" "github.com/gorilla/mux" @@ -26,14 +26,14 @@ import ( ) type APIServer struct { - http.Server // The HTTP work happens here - *schema.Decoder // Decoder for Query parameters to structs - context.Context // Context to carry objects to handlers - *libpod.Runtime // Where the real work happens - net.Listener // mux for routing HTTP API calls to libpod routines - context.CancelFunc // Stop APIServer - idleTracker *idletracker.IdleTracker // Track connections to support idle shutdown - pprof *http.Server // Sidecar http server for providing performance data + http.Server // The HTTP work happens here + *schema.Decoder // Decoder for Query parameters to structs + context.Context // Context to carry objects to handlers + *libpod.Runtime // Where the real work happens + net.Listener // mux for routing HTTP API calls to libpod routines + context.CancelFunc // Stop APIServer + idleTracker *idle.Tracker // Track connections to support idle shutdown + pprof *http.Server // Sidecar http server for providing performance data } // Number of seconds to wait for next request, if exceeded shutdown server @@ -70,13 +70,13 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li } router := mux.NewRouter().UseEncodedPath() - idle := idletracker.NewIdleTracker(duration) + idle := idle.NewTracker(duration) server := APIServer{ Server: http.Server{ Handler: router, ReadHeaderTimeout: 20 * time.Second, - IdleTimeout: duration, + IdleTimeout: duration * 2, ConnState: idle.ConnState, ErrorLog: log.New(logrus.StandardLogger().Out, "", 0), }, diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index 69a7da869..fcbf6fe39 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -3,6 +3,7 @@ package auth import ( "encoding/base64" "encoding/json" + "fmt" "io/ioutil" "net/http" "os" @@ -15,21 +16,98 @@ import ( "github.com/sirupsen/logrus" ) -// XRegistryAuthHeader is the key to the encoded registry authentication -// configuration in an http-request header. -const XRegistryAuthHeader = "X-Registry-Auth" +type HeaderAuthName string -// GetCredentials extracts one or more DockerAuthConfigs from the request's +func (h HeaderAuthName) String() string { return string(h) } + +// XRegistryAuthHeader is the key to the encoded registry authentication configuration in an http-request header. +// This header supports one registry per header occurrence. To support N registries provided N headers, one per registry. +// As of Docker API 1.40 and Libpod API 1.0.0, this header is supported by all endpoints. +const XRegistryAuthHeader HeaderAuthName = "X-Registry-Auth" + +// XRegistryConfigHeader is the key to the encoded registry authentication configuration in an http-request header. +// This header supports N registries in one header via a Base64 encoded, JSON map. +// As of Docker API 1.40 and Libpod API 2.0.0, this header is supported by build endpoints. +const XRegistryConfigHeader HeaderAuthName = "X-Registry-Config" + +// GetCredentials queries the http.Request for X-Registry-.* headers and extracts +// the necessary authentication information for libpod operations +func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, HeaderAuthName, error) { + has := func(key HeaderAuthName) bool { hdr, found := r.Header[string(key)]; return found && len(hdr) > 0 } + switch { + case has(XRegistryConfigHeader): + c, f, err := getConfigCredentials(r) + return c, f, XRegistryConfigHeader, err + case has(XRegistryAuthHeader): + c, f, err := getAuthCredentials(r) + return c, f, XRegistryAuthHeader, err + + } + return nil, "", "", nil +} + +// getConfigCredentials extracts one or more docker.AuthConfig from the request's +// header. An empty key will be used as default while a named registry will be +// returned as types.DockerAuthConfig +func getConfigCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) { + var auth *types.DockerAuthConfig + configs := make(map[string]types.DockerAuthConfig) + + for _, h := range r.Header[string(XRegistryConfigHeader)] { + param, err := base64.URLEncoding.DecodeString(h) + if err != nil { + return nil, "", errors.Wrapf(err, "failed to decode %q", XRegistryConfigHeader) + } + + ac := make(map[string]dockerAPITypes.AuthConfig) + err = json.Unmarshal(param, &ac) + if err != nil { + return nil, "", errors.Wrapf(err, "failed to unmarshal %q", XRegistryConfigHeader) + } + + for k, v := range ac { + configs[k] = dockerAuthToImageAuth(v) + } + } + + // Empty key implies no registry given in API + if c, found := configs[""]; found { + auth = &c + } + + // Override any default given above if specialized credentials provided + if registries, found := r.URL.Query()["registry"]; found { + for _, r := range registries { + for k, v := range configs { + if strings.Contains(k, r) { + v := v + auth = &v + break + } + } + if auth != nil { + break + } + } + + if auth == nil { + logrus.Debugf("%q header found in request, but \"registry=%v\" query parameter not provided", + XRegistryConfigHeader, registries) + } else { + logrus.Debugf("%q header found in request for username %q", XRegistryConfigHeader, auth.Username) + } + } + + authfile, err := authConfigsToAuthFile(configs) + return auth, authfile, err +} + +// getAuthCredentials extracts one or more DockerAuthConfigs from the request's // header. The header could specify a single-auth config in which case the // first return value is set. In case of a multi-auth header, the contents are // stored in a temporary auth file (2nd return value). Note that the auth file // should be removed after usage. -func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) { - authHeader := r.Header.Get(XRegistryAuthHeader) - if len(authHeader) == 0 { - return nil, "", nil - } - +func getAuthCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) { // First look for a multi-auth header (i.e., a map). authConfigs, err := multiAuthHeader(r) if err == nil { @@ -51,38 +129,75 @@ func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) { return conf, "", nil } -// Header returns a map with the XRegistryAuthHeader set which can +// Header builds the requested Authentication Header +func Header(sys *types.SystemContext, headerName HeaderAuthName, authfile, username, password string) (map[string]string, error) { + var ( + content string + err error + ) + switch headerName { + case XRegistryAuthHeader: + content, err = headerAuth(sys, authfile, username, password) + case XRegistryConfigHeader: + content, err = headerConfig(sys, authfile, username, password) + default: + err = fmt.Errorf("unsupported authentication header: %q", headerName) + } + if err != nil { + return nil, err + } + + if len(content) > 0 { + return map[string]string{string(headerName): content}, nil + } + return nil, nil +} + +// headerConfig returns a map with the XRegistryConfigHeader set which can // conveniently be used in the http stack. -func Header(sys *types.SystemContext, authfile, username, password string) (map[string]string, error) { - var content string - var err error +func headerConfig(sys *types.SystemContext, authfile, username, password string) (string, error) { + if sys == nil { + sys = &types.SystemContext{} + } + if authfile != "" { + sys.AuthFilePath = authfile + } + authConfigs, err := imageAuth.GetAllCredentials(sys) + if err != nil { + return "", err + } if username != "" { - content, err = encodeSingleAuthConfig(types.DockerAuthConfig{Username: username, Password: password}) - if err != nil { - return nil, err - } - } else { - if sys == nil { - sys = &types.SystemContext{} - } - if authfile != "" { - sys.AuthFilePath = authfile - } - authConfigs, err := imageAuth.GetAllCredentials(sys) - if err != nil { - return nil, err - } - content, err = encodeMultiAuthConfigs(authConfigs) - if err != nil { - return nil, err + authConfigs[""] = types.DockerAuthConfig{ + Username: username, + Password: password, } } - header := make(map[string]string) - header[XRegistryAuthHeader] = content + if len(authConfigs) == 0 { + return "", nil + } + return encodeMultiAuthConfigs(authConfigs) +} - return header, nil +// headerAuth returns a base64 encoded map with the XRegistryAuthHeader set which can +// conveniently be used in the http stack. +func headerAuth(sys *types.SystemContext, authfile, username, password string) (string, error) { + if username != "" { + return encodeSingleAuthConfig(types.DockerAuthConfig{Username: username, Password: password}) + } + + if sys == nil { + sys = &types.SystemContext{} + } + if authfile != "" { + sys.AuthFilePath = authfile + } + authConfigs, err := imageAuth.GetAllCredentials(sys) + if err != nil { + return "", err + } + return encodeMultiAuthConfigs(authConfigs) } // RemoveAuthfile is a convenience function that is meant to be called in a @@ -180,7 +295,7 @@ func imageAuthToDockerAuth(authConfig types.DockerAuthConfig) dockerAPITypes.Aut // singleAuthHeader extracts a DockerAuthConfig from the request's header. // The header content is a single DockerAuthConfig. func singleAuthHeader(r *http.Request) (map[string]types.DockerAuthConfig, error) { - authHeader := r.Header.Get(XRegistryAuthHeader) + authHeader := r.Header.Get(string(XRegistryAuthHeader)) authConfig := dockerAPITypes.AuthConfig{} if len(authHeader) > 0 { authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authHeader)) @@ -196,7 +311,7 @@ func singleAuthHeader(r *http.Request) (map[string]types.DockerAuthConfig, error // multiAuthHeader extracts a DockerAuthConfig from the request's header. // The header content is a map[string]DockerAuthConfigs. func multiAuthHeader(r *http.Request) (map[string]types.DockerAuthConfig, error) { - authHeader := r.Header.Get(XRegistryAuthHeader) + authHeader := r.Header.Get(string(XRegistryAuthHeader)) if len(authHeader) == 0 { return nil, nil } diff --git a/pkg/bindings/bindings.go b/pkg/bindings/bindings.go index ae5610b0f..14f306910 100644 --- a/pkg/bindings/bindings.go +++ b/pkg/bindings/bindings.go @@ -22,5 +22,5 @@ var ( PFalse = &pFalse // APIVersion - podman will fail to run if this value is wrong - APIVersion = semver.MustParse("1.0.0") + APIVersion = semver.MustParse("2.0.0") ) diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go index 981912665..46e4df1d2 100644 --- a/pkg/bindings/containers/containers.go +++ b/pkg/bindings/containers/containers.go @@ -197,7 +197,56 @@ func Start(ctx context.Context, nameOrID string, detachKeys *string) error { return response.Process(nil) } -func Stats() {} +func Stats(ctx context.Context, containers []string, stream *bool) (chan entities.ContainerStatsReport, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params := url.Values{} + if stream != nil { + params.Set("stream", strconv.FormatBool(*stream)) + } + for _, c := range containers { + params.Add("containers", c) + } + + response, err := conn.DoRequest(nil, http.MethodGet, "/containers/stats", params, nil) + if err != nil { + return nil, err + } + + statsChan := make(chan entities.ContainerStatsReport) + + go func() { + defer close(statsChan) + + dec := json.NewDecoder(response.Body) + doStream := true + if stream != nil { + doStream = *stream + } + + streamLabel: // label to flatten the scope + select { + case <-response.Request.Context().Done(): + return // lost connection - maybe the server quit + default: + // fall through and do some work + } + var report entities.ContainerStatsReport + if err := dec.Decode(&report); err != nil { + report = entities.ContainerStatsReport{Error: err} + } + statsChan <- report + + if report.Error != nil || !doStream { + return + } + goto streamLabel + }() + + return statsChan, nil +} // Top gathers statistics about the running processes in a container. The nameOrID can be a container name // or a partial/full ID. The descriptors allow for specifying which data to collect from the process. diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go new file mode 100644 index 000000000..60ffea548 --- /dev/null +++ b/pkg/bindings/images/build.go @@ -0,0 +1,248 @@ +package images + +import ( + "archive/tar" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/containers/buildah" + "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/bindings" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/docker/go-units" + "github.com/hashicorp/go-multierror" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Build creates an image using a containerfile reference +func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions) (*entities.BuildReport, error) { + params := url.Values{} + + if t := options.Output; len(t) > 0 { + params.Set("t", t) + } + for _, tag := range options.AdditionalTags { + params.Add("t", tag) + } + if options.Quiet { + params.Set("q", "1") + } + if options.NoCache { + params.Set("nocache", "1") + } + // TODO cachefrom + if options.PullPolicy == buildah.PullAlways { + params.Set("pull", "1") + } + if options.RemoveIntermediateCtrs { + params.Set("rm", "1") + } + if options.ForceRmIntermediateCtrs { + params.Set("forcerm", "1") + } + if mem := options.CommonBuildOpts.Memory; mem > 0 { + params.Set("memory", strconv.Itoa(int(mem))) + } + if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { + params.Set("memswap", strconv.Itoa(int(memSwap))) + } + if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 { + params.Set("cpushares", strconv.Itoa(int(cpuShares))) + } + if cpuSetCpus := options.CommonBuildOpts.CPUSetCPUs; len(cpuSetCpus) > 0 { + params.Set("cpusetcpus", cpuSetCpus) + } + if cpuPeriod := options.CommonBuildOpts.CPUPeriod; cpuPeriod > 0 { + params.Set("cpuperiod", strconv.Itoa(int(cpuPeriod))) + } + if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 { + params.Set("cpuquota", strconv.Itoa(int(cpuQuota))) + } + if buildArgs := options.Args; len(buildArgs) > 0 { + bArgs, err := jsoniter.MarshalToString(buildArgs) + if err != nil { + return nil, err + } + params.Set("buildargs", bArgs) + } + if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 { + shmBytes, err := units.RAMInBytes(shmSize) + if err != nil { + return nil, err + } + params.Set("shmsize", strconv.Itoa(int(shmBytes))) + } + if options.Squash { + params.Set("squash", "1") + } + if labels := options.Labels; len(labels) > 0 { + l, err := jsoniter.MarshalToString(labels) + if err != nil { + return nil, err + } + params.Set("labels", l) + } + if options.CommonBuildOpts.HTTPProxy { + params.Set("httpproxy", "1") + } + + var ( + headers map[string]string + err error + ) + if options.SystemContext == nil { + headers, err = auth.Header(options.SystemContext, auth.XRegistryConfigHeader, "", "", "") + } else { + if options.SystemContext.DockerAuthConfig != nil { + headers, err = auth.Header(options.SystemContext, auth.XRegistryAuthHeader, options.SystemContext.AuthFilePath, options.SystemContext.DockerAuthConfig.Username, options.SystemContext.DockerAuthConfig.Password) + } else { + headers, err = auth.Header(options.SystemContext, auth.XRegistryConfigHeader, options.SystemContext.AuthFilePath, "", "") + } + } + if err != nil { + return nil, err + } + + stdout := io.Writer(os.Stdout) + if options.Out != nil { + stdout = options.Out + } + + // TODO network? + + var platform string + if OS := options.OS; len(OS) > 0 { + platform += OS + } + if arch := options.Architecture; len(arch) > 0 { + platform += "/" + arch + } + if len(platform) > 0 { + params.Set("platform", platform) + } + + entries := make([]string, len(containerFiles)) + copy(entries, containerFiles) + entries = append(entries, options.ContextDirectory) + tarfile, err := nTar(entries...) + if err != nil { + return nil, err + } + defer tarfile.Close() + params.Set("dockerfile", filepath.Base(containerFiles[0])) + + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params, headers) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if !response.IsSuccess() { + return nil, response.Process(err) + } + + body := response.Body.(io.Reader) + if logrus.IsLevelEnabled(logrus.DebugLevel) { + if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { + if keep, _ := strconv.ParseBool(v); keep { + t, _ := ioutil.TempFile("", "build_*_client") + defer t.Close() + body = io.TeeReader(response.Body, t) + } + } + } + + dec := json.NewDecoder(body) + re := regexp.MustCompile(`[0-9a-f]{12}`) + + var id string + for { + var s struct { + Stream string `json:"stream,omitempty"` + Error string `json:"error,omitempty"` + } + if err := dec.Decode(&s); err != nil { + if errors.Is(err, io.EOF) { + return &entities.BuildReport{ID: id}, nil + } + s.Error = err.Error() + "\n" + } + + switch { + case s.Stream != "": + stdout.Write([]byte(s.Stream)) + if re.Match([]byte(s.Stream)) { + id = s.Stream + } + case s.Error != "": + return nil, errors.New(s.Error) + default: + return &entities.BuildReport{ID: id}, errors.New("failed to parse build results stream, unexpected input") + } + } +} + +func nTar(sources ...string) (io.ReadCloser, error) { + if len(sources) == 0 { + return nil, errors.New("No source(s) provided for build") + } + + pr, pw := io.Pipe() + tw := tar.NewWriter(pw) + + var merr error + go func() { + defer pw.Close() + defer tw.Close() + + for _, src := range sources { + s := src + err := filepath.Walk(s, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() || path == s { + return nil + } + + f, lerr := os.Open(path) + if lerr != nil { + return lerr + } + + name := strings.TrimPrefix(path, s+string(filepath.Separator)) + hdr, lerr := tar.FileInfoHeader(info, name) + if lerr != nil { + f.Close() + return lerr + } + hdr.Name = name + if lerr := tw.WriteHeader(hdr); lerr != nil { + f.Close() + return lerr + } + + _, cerr := io.Copy(tw, f) + f.Close() + return cerr + }) + merr = multierror.Append(merr, err) + } + }() + return pr, merr +} diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go index a80c94025..a78e7f4c6 100644 --- a/pkg/bindings/images/images.go +++ b/pkg/bindings/images/images.go @@ -1,7 +1,6 @@ package images import ( - "bytes" "context" "fmt" "io" @@ -9,14 +8,11 @@ import ( "net/url" "strconv" - "github.com/containers/buildah" "github.com/containers/image/v5/types" "github.com/containers/podman/v2/pkg/api/handlers" "github.com/containers/podman/v2/pkg/auth" "github.com/containers/podman/v2/pkg/bindings" "github.com/containers/podman/v2/pkg/domain/entities" - "github.com/docker/go-units" - jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" ) @@ -242,112 +238,6 @@ func Untag(ctx context.Context, nameOrID, tag, repo string) error { return response.Process(nil) } -// Build creates an image using a containerfile reference -func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions, tarfile io.Reader) (*entities.BuildReport, error) { - var ( - platform string - report entities.BuildReport - ) - conn, err := bindings.GetClient(ctx) - if err != nil { - return nil, err - } - params := url.Values{} - params.Set("dockerfile", containerFiles[0]) - if t := options.Output; len(t) > 0 { - params.Set("t", t) - } - for _, tag := range options.AdditionalTags { - params.Add("t", tag) - } - // TODO Remote, Quiet - if options.NoCache { - params.Set("nocache", "1") - } - // TODO cachefrom - if options.PullPolicy == buildah.PullAlways { - params.Set("pull", "1") - } - if options.RemoveIntermediateCtrs { - params.Set("rm", "1") - } - if options.ForceRmIntermediateCtrs { - params.Set("forcerm", "1") - } - if mem := options.CommonBuildOpts.Memory; mem > 0 { - params.Set("memory", strconv.Itoa(int(mem))) - } - if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { - params.Set("memswap", strconv.Itoa(int(memSwap))) - } - if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 { - params.Set("cpushares", strconv.Itoa(int(cpuShares))) - } - if cpuSetCpus := options.CommonBuildOpts.CPUSetCPUs; len(cpuSetCpus) > 0 { - params.Set("cpusetcpues", cpuSetCpus) - } - if cpuPeriod := options.CommonBuildOpts.CPUPeriod; cpuPeriod > 0 { - params.Set("cpuperiod", strconv.Itoa(int(cpuPeriod))) - } - if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 { - params.Set("cpuquota", strconv.Itoa(int(cpuQuota))) - } - if buildArgs := options.Args; len(buildArgs) > 0 { - bArgs, err := jsoniter.MarshalToString(buildArgs) - if err != nil { - return nil, err - } - params.Set("buildargs", bArgs) - } - if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 { - shmBytes, err := units.RAMInBytes(shmSize) - if err != nil { - return nil, err - } - params.Set("shmsize", strconv.Itoa(int(shmBytes))) - } - if options.Squash { - params.Set("squash", "1") - } - if labels := options.Labels; len(labels) > 0 { - l, err := jsoniter.MarshalToString(labels) - if err != nil { - return nil, err - } - params.Set("labels", l) - } - - // TODO network? - if OS := options.OS; len(OS) > 0 { - platform += OS - } - if arch := options.Architecture; len(arch) > 0 { - platform += "/" + arch - } - if len(platform) > 0 { - params.Set("platform", platform) - } - // TODO outputs? - - response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params, nil) - if err != nil { - return nil, err - } - var streamReponse []byte - bb := bytes.NewBuffer(streamReponse) - if _, err = io.Copy(bb, response.Body); err != nil { - return nil, err - } - var s struct { - Stream string `json:"stream"` - } - if err := jsoniter.UnmarshalFromString(bb.String(), &s); err != nil { - return nil, err - } - fmt.Print(s.Stream) - return &report, nil -} - // Imports adds the given image to the local image store. This can be done by file and the given reader // or via the url parameter. Additional metadata can be associated with the image by using the changes and // message parameters. The image can also be tagged given a reference. One of url OR r must be provided. @@ -380,51 +270,6 @@ func Import(ctx context.Context, changes []string, message, reference, u *string return &report, response.Process(&report) } -// Pull is the binding for libpod's v2 endpoints for pulling images. Note that -// `rawImage` must be a reference to a registry (i.e., of docker transport or be -// normalized to one). Other transports are rejected as they do not make sense -// in a remote context. -func Pull(ctx context.Context, rawImage string, options entities.ImagePullOptions) ([]string, error) { - conn, err := bindings.GetClient(ctx) - if err != nil { - return nil, err - } - params := url.Values{} - params.Set("reference", rawImage) - params.Set("overrideArch", options.OverrideArch) - params.Set("overrideOS", options.OverrideOS) - params.Set("overrideVariant", options.OverrideVariant) - if options.SkipTLSVerify != types.OptionalBoolUndefined { - // Note: we have to verify if skipped is false. - verifyTLS := bool(options.SkipTLSVerify == types.OptionalBoolFalse) - params.Set("tlsVerify", strconv.FormatBool(verifyTLS)) - } - params.Set("allTags", strconv.FormatBool(options.AllTags)) - - // TODO: have a global system context we can pass around (1st argument) - header, err := auth.Header(nil, options.Authfile, options.Username, options.Password) - if err != nil { - return nil, err - } - - response, err := conn.DoRequest(nil, http.MethodPost, "/images/pull", params, header) - if err != nil { - return nil, err - } - - reports := []handlers.LibpodImagesPullReport{} - if err := response.Process(&reports); err != nil { - return nil, err - } - - pulledImages := []string{} - for _, r := range reports { - pulledImages = append(pulledImages, r.ID) - } - - return pulledImages, nil -} - // Push is the binding for libpod's v2 endpoints for push images. Note that // `source` must be a referring to an image in the remote's container storage. // The destination must be a reference to a registry (i.e., of docker transport @@ -437,7 +282,7 @@ func Push(ctx context.Context, source string, destination string, options entiti } // TODO: have a global system context we can pass around (1st argument) - header, err := auth.Header(nil, options.Authfile, options.Username, options.Password) + header, err := auth.Header(nil, auth.XRegistryAuthHeader, options.Authfile, options.Username, options.Password) if err != nil { return err } @@ -480,7 +325,7 @@ func Search(ctx context.Context, term string, opts entities.ImageSearchOptions) } // TODO: have a global system context we can pass around (1st argument) - header, err := auth.Header(nil, opts.Authfile, "", "") + header, err := auth.Header(nil, auth.XRegistryAuthHeader, opts.Authfile, "", "") if err != nil { return nil, err } diff --git a/pkg/bindings/images/pull.go b/pkg/bindings/images/pull.go new file mode 100644 index 000000000..c827b3283 --- /dev/null +++ b/pkg/bindings/images/pull.go @@ -0,0 +1,99 @@ +package images + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/types" + "github.com/containers/podman/v2/pkg/auth" + "github.com/containers/podman/v2/pkg/bindings" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/hashicorp/go-multierror" +) + +// Pull is the binding for libpod's v2 endpoints for pulling images. Note that +// `rawImage` must be a reference to a registry (i.e., of docker transport or be +// normalized to one). Other transports are rejected as they do not make sense +// in a remote context. Progress reported on stderr +func Pull(ctx context.Context, rawImage string, options entities.ImagePullOptions) ([]string, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params := url.Values{} + params.Set("reference", rawImage) + params.Set("overrideArch", options.OverrideArch) + params.Set("overrideOS", options.OverrideOS) + params.Set("overrideVariant", options.OverrideVariant) + + if options.SkipTLSVerify != types.OptionalBoolUndefined { + // Note: we have to verify if skipped is false. + verifyTLS := bool(options.SkipTLSVerify == types.OptionalBoolFalse) + params.Set("tlsVerify", strconv.FormatBool(verifyTLS)) + } + params.Set("allTags", strconv.FormatBool(options.AllTags)) + + // TODO: have a global system context we can pass around (1st argument) + header, err := auth.Header(nil, auth.XRegistryAuthHeader, options.Authfile, options.Username, options.Password) + if err != nil { + return nil, err + } + + response, err := conn.DoRequest(nil, http.MethodPost, "/images/pull", params, header) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if !response.IsSuccess() { + return nil, response.Process(err) + } + + // Historically pull writes status to stderr + stderr := io.Writer(os.Stderr) + if options.Quiet { + stderr = ioutil.Discard + } + + dec := json.NewDecoder(response.Body) + var images []string + var mErr error + for { + var report entities.ImagePullReport + if err := dec.Decode(&report); err != nil { + if errors.Is(err, io.EOF) { + break + } + report.Error = err.Error() + "\n" + } + + select { + case <-response.Request.Context().Done(): + return images, mErr + default: + // non-blocking select + } + + switch { + case report.Stream != "": + fmt.Fprint(stderr, report.Stream) + case report.Error != "": + mErr = multierror.Append(mErr, errors.New(report.Error)) + case len(report.Images) > 0: + images = report.Images + case report.ID != "": + default: + return images, errors.New("failed to parse pull results stream, unexpected input") + } + + } + return images, mErr +} diff --git a/pkg/bindings/network/network.go b/pkg/bindings/network/network.go index d8dc7e352..151d15d3e 100644 --- a/pkg/bindings/network/network.go +++ b/pkg/bindings/network/network.go @@ -60,7 +60,7 @@ func Remove(ctx context.Context, nameOrID string, force *bool) ([]*entities.Netw } params := url.Values{} if force != nil { - params.Set("size", strconv.FormatBool(*force)) + params.Set("force", strconv.FormatBool(*force)) } response, err := conn.DoRequest(nil, http.MethodDelete, "/networks/%s", params, nil, nameOrID) if err != nil { diff --git a/pkg/bindings/play/play.go b/pkg/bindings/play/play.go index 32f9bb4a9..ffaee3208 100644 --- a/pkg/bindings/play/play.go +++ b/pkg/bindings/play/play.go @@ -33,7 +33,7 @@ func Kube(ctx context.Context, path string, options entities.PlayKubeOptions) (* } // TODO: have a global system context we can pass around (1st argument) - header, err := auth.Header(nil, options.Authfile, options.Username, options.Password) + header, err := auth.Header(nil, auth.XRegistryAuthHeader, options.Authfile, options.Username, options.Password) if err != nil { return nil, err } diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go index e995770ba..1203f5c3c 100644 --- a/pkg/bindings/system/system.go +++ b/pkg/bindings/system/system.go @@ -118,10 +118,10 @@ func Version(ctx context.Context) (*entities.SystemVersionReport, error) { if err = response.Process(&component); err != nil { return nil, err } - f, _ := strconv.ParseFloat(component.APIVersion, 64) + b, _ := time.Parse(time.RFC3339, component.BuildTime) report.Server = &define.Version{ - APIVersion: int64(f), + APIVersion: component.APIVersion, Version: component.Version.Version, GoVersion: component.GoVersion, GitCommit: component.GitCommit, @@ -129,6 +129,12 @@ func Version(ctx context.Context) (*entities.SystemVersionReport, error) { Built: b.Unix(), OsArch: fmt.Sprintf("%s/%s", component.Os, component.Arch), } + + for _, c := range component.Components { + if c.Name == "Podman Engine" { + report.Server.APIVersion = c.Details["APIVersion"] + } + } return &report, err } diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go index e0dd28d7a..681855293 100644 --- a/pkg/bindings/test/images_test.go +++ b/pkg/bindings/test/images_test.go @@ -360,19 +360,19 @@ var _ = Describe("Podman images", func() { rawImage := "docker.io/library/busybox:latest" pulledImages, err := images.Pull(bt.conn, rawImage, entities.ImagePullOptions{}) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) Expect(len(pulledImages)).To(Equal(1)) exists, err := images.Exists(bt.conn, rawImage) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) Expect(exists).To(BeTrue()) // Make sure the normalization AND the full-transport reference works. _, err = images.Pull(bt.conn, "docker://"+rawImage, entities.ImagePullOptions{}) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) // The v2 endpoint only supports the docker transport. Let's see if that's really true. _, err = images.Pull(bt.conn, "bogus-transport:bogus.com/image:reference", entities.ImagePullOptions{}) - Expect(err).To(Not(BeNil())) + Expect(err).To(HaveOccurred()) }) }) diff --git a/pkg/channel/doc.go b/pkg/channel/doc.go new file mode 100644 index 000000000..656fbddaa --- /dev/null +++ b/pkg/channel/doc.go @@ -0,0 +1,17 @@ +/* +Package channel provides helper structs/methods/funcs for working with channels + +Proxy from an io.Writer to a channel: + + w := channel.NewWriter(make(chan []byte, 10)) + go func() { + w.Write([]byte("Hello, World")) + }() + + fmt.Println(string(<-w.Chan())) + w.Close() + +Use of the constructor is required to initialize the channel. +Provide a channel of sufficient size to handle messages from writer(s). +*/ +package channel diff --git a/pkg/channel/writer.go b/pkg/channel/writer.go new file mode 100644 index 000000000..dbb38e416 --- /dev/null +++ b/pkg/channel/writer.go @@ -0,0 +1,53 @@ +package channel + +import ( + "io" + "sync" + + "github.com/pkg/errors" +) + +// WriteCloser is an io.WriteCloser that that proxies Write() calls to a channel +// The []byte buffer of the Write() is queued on the channel as one message. +type WriteCloser interface { + io.WriteCloser + Chan() <-chan []byte +} + +type writeCloser struct { + ch chan []byte + mux sync.Mutex +} + +// NewWriter initializes a new channel writer +func NewWriter(c chan []byte) WriteCloser { + return &writeCloser{ + ch: c, + } +} + +// Chan returns the R/O channel behind WriteCloser +func (w *writeCloser) Chan() <-chan []byte { + return w.ch +} + +// Write method for WriteCloser +func (w *writeCloser) Write(b []byte) (int, error) { + if w == nil || w.ch == nil { + return 0, errors.New("use channel.NewWriter() to initialize a WriteCloser") + } + + w.mux.Lock() + buf := make([]byte, len(b)) + copy(buf, b) + w.ch <- buf + w.mux.Unlock() + + return len(b), nil +} + +// Close method for WriteCloser +func (w *writeCloser) Close() error { + close(w.ch) + return nil +} diff --git a/pkg/channelwriter/channelwriter.go b/pkg/channelwriter/channelwriter.go deleted file mode 100644 index d51400eb3..000000000 --- a/pkg/channelwriter/channelwriter.go +++ /dev/null @@ -1,34 +0,0 @@ -package channelwriter - -import "github.com/pkg/errors" - -// Writer is an io.writer-like object that "writes" to a channel -// instead of a buffer or file, etc. It is handy for varlink endpoints when -// needing to handle endpoints that do logging "real-time" -type Writer struct { - ByteChannel chan []byte -} - -// NewChannelWriter creates a new channel writer and adds a -// byte slice channel into it. -func NewChannelWriter() *Writer { - byteChannel := make(chan []byte) - return &Writer{ - ByteChannel: byteChannel, - } -} - -// Write method for Writer -func (c *Writer) Write(w []byte) (int, error) { - if c.ByteChannel == nil { - return 0, errors.New("channel writer channel cannot be nil") - } - c.ByteChannel <- w - return len(w), nil -} - -// Close method for Writer -func (c *Writer) Close() error { - close(c.ByteChannel) - return nil -} diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go index 16997cdd1..7b272f01e 100644 --- a/pkg/domain/entities/containers.go +++ b/pkg/domain/entities/containers.go @@ -411,10 +411,17 @@ type ContainerCpReport struct { // ContainerStatsOptions describes input options for getting // stats on containers type ContainerStatsOptions struct { - All bool - Format string - Latest bool - NoReset bool - NoStream bool - StatChan chan []*define.ContainerStats + // Operate on the latest known container. Only supported for local + // clients. + Latest bool + // Stream stats. + Stream bool +} + +// ContainerStatsReport is used for streaming container stats. +type ContainerStatsReport struct { + // Error from reading stats. + Error error + // Results, set when there is no error. + Stats []define.ContainerStats } diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go index 6c85c9267..803a59932 100644 --- a/pkg/domain/entities/engine_container.go +++ b/pkg/domain/entities/engine_container.go @@ -38,7 +38,7 @@ type ContainerEngine interface { ContainerRun(ctx context.Context, opts ContainerRunOptions) (*ContainerRunReport, error) ContainerRunlabel(ctx context.Context, label string, image string, args []string, opts ContainerRunlabelOptions) error ContainerStart(ctx context.Context, namesOrIds []string, options ContainerStartOptions) ([]*ContainerStartReport, error) - ContainerStats(ctx context.Context, namesOrIds []string, options ContainerStatsOptions) error + ContainerStats(ctx context.Context, namesOrIds []string, options ContainerStatsOptions) (chan ContainerStatsReport, error) ContainerStop(ctx context.Context, namesOrIds []string, options StopOptions) ([]*StopReport, error) ContainerTop(ctx context.Context, options TopOptions) (*StringSliceReport, error) ContainerUnmount(ctx context.Context, nameOrIDs []string, options ContainerUnmountOptions) ([]*ContainerUnmountReport, error) @@ -78,6 +78,6 @@ type ContainerEngine interface { VolumeCreate(ctx context.Context, opts VolumeCreateOptions) (*IDOrNameResponse, error) VolumeInspect(ctx context.Context, namesOrIds []string, opts VolumeInspectOptions) ([]*VolumeInspectReport, error) VolumeList(ctx context.Context, opts VolumeListOptions) ([]*VolumeListReport, error) - VolumePrune(ctx context.Context, opts VolumePruneOptions) ([]*VolumePruneReport, error) + VolumePrune(ctx context.Context) ([]*VolumePruneReport, error) VolumeRm(ctx context.Context, namesOrIds []string, opts VolumeRmOptions) ([]*VolumeRmReport, error) } diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index 2a8133680..ac81c282d 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -3,6 +3,7 @@ package entities import ( "time" + "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/podman/v2/pkg/inspect" @@ -45,7 +46,7 @@ type Image struct { HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"` } -func (i *Image) Id() string { //nolint +func (i *Image) Id() string { // nolint return i.ID } @@ -70,7 +71,7 @@ type ImageSummary struct { History []string `json:",omitempty"` } -func (i *ImageSummary) Id() string { //nolint +func (i *ImageSummary) Id() string { // nolint return i.ID } @@ -119,8 +120,8 @@ type ImageHistoryReport struct { // ImagePullOptions are the arguments for pulling images. type ImagePullOptions struct { - // AllTags can be specified to pull all tags of the spiecifed image. Note - // that this only works if the specified image does not include a tag. + // AllTags can be specified to pull all tags of an image. Note + // that this only works if the image does not include a tag. AllTags bool // Authfile is the path to the authentication file. Ignored for remote // calls. @@ -146,11 +147,20 @@ type ImagePullOptions struct { SignaturePolicy string // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify types.OptionalBool + // PullPolicy whether to pull new image + PullPolicy config.PullPolicy } // ImagePullReport is the response from pulling one or more images. type ImagePullReport struct { - Images []string + // Stream used to provide output from c/image + Stream string `json:"stream,omitempty"` + // Error contains text of errors from c/image + Error string `json:"error,omitempty"` + // Images contains the ID's of the images pulled + Images []string `json:"images,omitempty"` + // ID contains image id (retained for backwards compatibility) + ID string `json:"id,omitempty"` } // ImagePushOptions are the arguments for pushing images. @@ -259,16 +269,17 @@ type ImageLoadReport struct { } type ImageImportOptions struct { - Changes []string - Message string - Quiet bool - Reference string - Source string - SourceIsURL bool + Changes []string + Message string + Quiet bool + Reference string + SignaturePolicy string + Source string + SourceIsURL bool } type ImageImportReport struct { - Id string //nolint + Id string // nolint } // ImageSaveOptions provide options for saving images. @@ -348,7 +359,7 @@ type ImageUnmountOptions struct { // ImageMountReport describes the response from image mount type ImageMountReport struct { Err error - Id string //nolint + Id string // nolint Name string Repositories []string Path string @@ -357,5 +368,5 @@ type ImageMountReport struct { // ImageUnmountReport describes the response from umounting an image type ImageUnmountReport struct { Err error - Id string //nolint + Id string // nolint } diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go index 7b38dbd87..426419833 100644 --- a/pkg/domain/entities/pods.go +++ b/pkg/domain/entities/pods.go @@ -142,6 +142,7 @@ func (p PodCreateOptions) ToPodSpecGen(s *specgen.PodSpecGenerator) { s.StaticMAC = p.Net.StaticMAC s.PortMappings = p.Net.PublishPorts s.CNINetworks = p.Net.CNINetworks + s.NetworkOptions = p.Net.NetworkOptions if p.Net.UseImageResolvConf { s.NoManageResolvConf = true } diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go index 53d30ffdf..fb8466d04 100644 --- a/pkg/domain/entities/volumes.go +++ b/pkg/domain/entities/volumes.go @@ -113,10 +113,6 @@ type VolumeInspectReport struct { *VolumeConfigResponse } -type VolumePruneOptions struct { - Force bool -} - type VolumePruneReport struct { Err error Id string //nolint diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 21618f555..8b0d53940 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -1142,12 +1142,12 @@ func (ic *ContainerEngine) Shutdown(_ context.Context) { }) } -func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) error { - defer close(options.StatChan) +func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) (statsChan chan entities.ContainerStatsReport, err error) { + statsChan = make(chan entities.ContainerStatsReport, 1) + containerFunc := ic.Libpod.GetRunningContainers + queryAll := false switch { - case len(namesOrIds) > 0: - containerFunc = func() ([]*libpod.Container, error) { return ic.Libpod.GetContainersByList(namesOrIds) } case options.Latest: containerFunc = func() ([]*libpod.Container, error) { lastCtr, err := ic.Libpod.GetLatestContainer() @@ -1156,62 +1156,76 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri } return []*libpod.Container{lastCtr}, nil } - case options.All: + case len(namesOrIds) > 0: + containerFunc = func() ([]*libpod.Container, error) { return ic.Libpod.GetContainersByList(namesOrIds) } + default: + // No containers, no latest -> query all! + queryAll = true containerFunc = ic.Libpod.GetAllContainers } - ctrs, err := containerFunc() - if err != nil { - return errors.Wrapf(err, "unable to get list of containers") - } - containerStats := map[string]*define.ContainerStats{} - for _, ctr := range ctrs { - initialStats, err := ctr.GetContainerStats(&define.ContainerStats{}) - if err != nil { - // when doing "all", don't worry about containers that are not running - cause := errors.Cause(err) - if options.All && (cause == define.ErrCtrRemoved || cause == define.ErrNoSuchCtr || cause == define.ErrCtrStateInvalid) { - continue - } - if cause == cgroups.ErrCgroupV1Rootless { - err = cause - } - return err + go func() { + defer close(statsChan) + var ( + err error + containers []*libpod.Container + containerStats map[string]*define.ContainerStats + ) + containerStats = make(map[string]*define.ContainerStats) + + stream: // label to flatten the scope + select { + case <-ctx.Done(): + // client cancelled + logrus.Debugf("Container stats stopped: context cancelled") + return + default: + // just fall through and do work } - containerStats[ctr.ID()] = initialStats - } - for { - reportStats := []*define.ContainerStats{} - for _, ctr := range ctrs { - id := ctr.ID() - if _, ok := containerStats[ctr.ID()]; !ok { - initialStats, err := ctr.GetContainerStats(&define.ContainerStats{}) - if errors.Cause(err) == define.ErrCtrRemoved || errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrStateInvalid { - // skip dealing with a container that is gone - continue + + // Anonymous func to easily use the return values for streaming. + computeStats := func() ([]define.ContainerStats, error) { + containers, err = containerFunc() + if err != nil { + return nil, errors.Wrapf(err, "unable to get list of containers") + } + + reportStats := []define.ContainerStats{} + for _, ctr := range containers { + prev, ok := containerStats[ctr.ID()] + if !ok { + prev = &define.ContainerStats{} } + + stats, err := ctr.GetContainerStats(prev) if err != nil { - return err + cause := errors.Cause(err) + if queryAll && (cause == define.ErrCtrRemoved || cause == define.ErrNoSuchCtr || cause == define.ErrCtrStateInvalid) { + continue + } + if cause == cgroups.ErrCgroupV1Rootless { + err = cause + } + return nil, err } - containerStats[id] = initialStats - } - stats, err := ctr.GetContainerStats(containerStats[id]) - if err != nil && errors.Cause(err) != define.ErrNoSuchCtr { - return err + + containerStats[ctr.ID()] = stats + reportStats = append(reportStats, *stats) } - // replace the previous measurement with the current one - containerStats[id] = stats - reportStats = append(reportStats, stats) + return reportStats, nil } - ctrs, err = containerFunc() - if err != nil { - return err - } - options.StatChan <- reportStats - if options.NoStream { - break + + report := entities.ContainerStatsReport{} + report.Stats, report.Error = computeStats() + statsChan <- report + + if report.Error != nil || !options.Stream { + return } + time.Sleep(time.Second) - } - return nil + goto stream + }() + + return statsChan, nil } diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 23aef9573..d56dc7d94 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -191,6 +191,15 @@ func (ir *ImageEngine) Unmount(ctx context.Context, nameOrIDs []string, options reports := []*entities.ImageUnmountReport{} for _, img := range images { report := entities.ImageUnmountReport{Id: img.ID()} + mounted, _, err := img.Mounted() + if err != nil { + // Errors will be caught in Unmount call below + // Default assumption to mounted + mounted = true + } + if !mounted { + continue + } if err := img.Unmount(options.Force); err != nil { if options.All && errors.Cause(err) == storage.ErrLayerNotMounted { logrus.Debugf("Error umounting image %s, storage.ErrLayerNotMounted", img.ID()) @@ -246,7 +255,7 @@ func pull(ctx context.Context, runtime *image.Runtime, rawImage string, options } if !options.AllTags { - newImage, err := runtime.New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, label, util.PullImageAlways) + newImage, err := runtime.New(ctx, rawImage, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, label, options.PullPolicy) if err != nil { return nil, err } @@ -467,7 +476,7 @@ func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions) } func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOptions) (*entities.ImageImportReport, error) { - id, err := ir.Libpod.Import(ctx, opts.Source, opts.Reference, opts.Changes, opts.Message, opts.Quiet) + id, err := ir.Libpod.Import(ctx, opts.Source, opts.Reference, opts.SignaturePolicy, opts.Changes, opts.Message, opts.Quiet) if err != nil { return nil, err } @@ -535,6 +544,7 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) { } func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { + id, _, err := ir.Libpod.Build(ctx, opts.BuildOptions, containerFiles...) if err != nil { return nil, err diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go index 7ec84246d..3e47dc67a 100644 --- a/pkg/domain/infra/abi/images_list.go +++ b/pkg/domain/infra/abi/images_list.go @@ -23,33 +23,13 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) summaries := []*entities.ImageSummary{} for _, img := range images { - var repoTags []string - if opts.All { - pairs, err := libpodImage.ReposToMap(img.Names()) - if err != nil { - return nil, err - } - - for repo, tags := range pairs { - for _, tag := range tags { - repoTags = append(repoTags, repo+":"+tag) - } - } - } else { - repoTags, err = img.RepoTags() - if err != nil { - return nil, err - } - } - digests := make([]string, len(img.Digests())) for j, d := range img.Digests() { digests[j] = string(d) } e := entities.ImageSummary{ - ID: img.ID(), - + ID: img.ID(), ConfigDigest: string(img.ConfigDigest), Created: img.Created().Unix(), Dangling: img.Dangling(), @@ -61,7 +41,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ReadOnly: img.IsReadOnly(), SharedSize: 0, VirtualSize: img.VirtualSize, - RepoTags: repoTags, + RepoTags: img.Names(), // may include tags and digests } e.Labels, _ = img.Labels(context.TODO()) diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go index 672d0a69f..6c518e678 100644 --- a/pkg/domain/infra/abi/manifest.go +++ b/pkg/domain/infra/abi/manifest.go @@ -3,6 +3,7 @@ package abi import ( + "bytes" "context" "encoding/json" "fmt" @@ -11,15 +12,17 @@ import ( "strings" "github.com/containers/buildah/manifests" + buildahManifests "github.com/containers/buildah/pkg/manifests" + "github.com/containers/buildah/util" buildahUtil "github.com/containers/buildah/util" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" libpodImage "github.com/containers/podman/v2/libpod/image" "github.com/containers/podman/v2/pkg/domain/entities" - "github.com/containers/podman/v2/pkg/util" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -41,28 +44,82 @@ func (ir *ImageEngine) ManifestCreate(ctx context.Context, names, images []strin // ManifestInspect returns the content of a manifest list or image func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string) ([]byte, error) { - dockerPrefix := fmt.Sprintf("%s://", docker.Transport.Name()) - _, err := alltransports.ParseImageName(name) + if newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(name); err == nil { + // return the manifest in local storage + if list, err := newImage.InspectManifest(); err == nil { + buf, err := json.MarshalIndent(list, "", " ") + if err != nil { + return buf, errors.Wrapf(err, "error rendering manifest %s for display", name) + } + return buf, nil + // no return if local image is not a list of images type + // continue on getting valid manifest through remote serice + } else if errors.Cause(err) != buildahManifests.ErrManifestTypeNotSupported { + return nil, errors.Wrapf(err, "loading manifest %q", name) + } + } + sc := ir.Libpod.SystemContext() + refs, err := util.ResolveNameToReferences(ir.Libpod.GetStore(), sc, name) if err != nil { - _, err = alltransports.ParseImageName(dockerPrefix + name) + return nil, err + } + var ( + latestErr error + result []byte + manType string + b bytes.Buffer + ) + appendErr := func(e error) { + if latestErr == nil { + latestErr = e + } else { + latestErr = errors.Wrapf(latestErr, "tried %v\n", e) + } + } + for _, ref := range refs { + src, err := ref.NewImageSource(ctx, sc) + if err != nil { + appendErr(errors.Wrapf(err, "reading image %q", transports.ImageName(ref))) + continue + } + defer src.Close() + + manifestBytes, manifestType, err := src.GetManifest(ctx, nil) if err != nil { - return nil, errors.Errorf("invalid image reference %q", name) + appendErr(errors.Wrapf(err, "loading manifest %q", transports.ImageName(ref))) + continue } + + if !manifest.MIMETypeIsMultiImage(manifestType) { + appendErr(errors.Errorf("manifest is of type %s (not a list type)", manifestType)) + continue + } + result = manifestBytes + manType = manifestType + break } - image, err := ir.Libpod.ImageRuntime().New(ctx, name, "", "", nil, nil, libpodImage.SigningOptions{}, nil, util.PullImageMissing) - if err != nil { - return nil, errors.Wrapf(err, "reading image %q", name) + if len(result) == 0 && latestErr != nil { + return nil, latestErr } + if manType != manifest.DockerV2ListMediaType { + listBlob, err := manifest.ListFromBlob(result, manType) + if err != nil { + return nil, errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(result), manType) + } + list, err := listBlob.ConvertToMIMEType(manifest.DockerV2ListMediaType) + if err != nil { + return nil, err + } + if result, err = list.Serialize(); err != nil { + return nil, err + } - list, err := image.InspectManifest() - if err != nil { - return nil, errors.Wrapf(err, "loading manifest %q", name) } - buf, err := json.MarshalIndent(list, "", " ") + err = json.Indent(&b, result, "", " ") if err != nil { - return buf, errors.Wrapf(err, "error rendering manifest for display") + return nil, errors.Wrapf(err, "error rendering manifest %s for display", name) } - return buf, nil + return b.Bytes(), nil } // ManifestAdd adds images to the manifest list diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go index 807e4b272..5acfea853 100644 --- a/pkg/domain/infra/abi/network.go +++ b/pkg/domain/infra/abi/network.go @@ -12,6 +12,7 @@ import ( "github.com/containernetworking/cni/libcni" cniversion "github.com/containernetworking/cni/pkg/version" "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/network" "github.com/containers/podman/v2/pkg/util" @@ -82,12 +83,21 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o // We need to iterate containers looking to see if they belong to the given network for _, c := range containers { if util.StringInSlice(name, c.Config().Networks) { - // if user passes force, we nuke containers + // if user passes force, we nuke containers and pods if !options.Force { // Without the force option, we return an error - return reports, errors.Errorf("%q has associated containers with it. Use -f to forcibly delete containers", name) + return reports, errors.Wrapf(define.ErrNetworkInUse, "%q has associated containers with it. Use -f to forcibly delete containers and pods", name) } - if err := ic.Libpod.RemoveContainer(ctx, c, true, true); err != nil { + if c.IsInfra() { + // if we have a infra container we need to remove the pod + pod, err := ic.Libpod.GetPod(c.PodID()) + if err != nil { + return reports, err + } + if err := ic.Libpod.RemovePod(ctx, pod, true, true); err != nil { + return reports, err + } + } else if err := ic.Libpod.RemoveContainer(ctx, c, true, true); err != nil { return reports, err } } diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go index 6dfb52c63..aa6aeede2 100644 --- a/pkg/domain/infra/abi/play.go +++ b/pkg/domain/infra/abi/play.go @@ -132,6 +132,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY libpod.WithInfraContainer(), libpod.WithPodName(podName), } + + if podYAML.ObjectMeta.Labels != nil { + podOptions = append(podOptions, libpod.WithPodLabels(podYAML.ObjectMeta.Labels)) + } + // TODO we only configure Process namespace. We also need to account for Host{IPC,Network,PID} // which is not currently possible with pod create if podYAML.Spec.ShareProcessNamespace != nil && *podYAML.Spec.ShareProcessNamespace { @@ -294,6 +299,18 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY return nil, err } + var ctrRestartPolicy string + switch podYAML.Spec.RestartPolicy { + case v1.RestartPolicyAlways: + ctrRestartPolicy = libpod.RestartPolicyAlways + case v1.RestartPolicyOnFailure: + ctrRestartPolicy = libpod.RestartPolicyOnFailure + case v1.RestartPolicyNever: + ctrRestartPolicy = libpod.RestartPolicyNo + default: // Default to Always + ctrRestartPolicy = libpod.RestartPolicyAlways + } + containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers)) for _, container := range podYAML.Spec.Containers { pullPolicy := util.PullImageMissing @@ -321,6 +338,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY if err != nil { return nil, err } + conf.RestartPolicy = ctrRestartPolicy ctr, err := createconfig.CreateContainerFromCreateConfig(ctx, ic.Libpod, conf, pod) if err != nil { return nil, err diff --git a/pkg/domain/infra/abi/system_varlink.go b/pkg/domain/infra/abi/system_varlink.go index d0a5c5407..ead84fc84 100644 --- a/pkg/domain/infra/abi/system_varlink.go +++ b/pkg/domain/infra/abi/system_varlink.go @@ -22,7 +22,7 @@ func (ic *ContainerEngine) VarlinkService(_ context.Context, opts entities.Servi service, err := varlink.NewService( "Atomic", "podman", - version.Version, + version.Version.String(), "https://github.com/containers/podman", ) if err != nil { diff --git a/pkg/domain/infra/abi/terminal/terminal.go b/pkg/domain/infra/abi/terminal/terminal.go index 0b6e57f49..48f5749d5 100644 --- a/pkg/domain/infra/abi/terminal/terminal.go +++ b/pkg/domain/infra/abi/terminal/terminal.go @@ -6,7 +6,7 @@ import ( "os/signal" lsignal "github.com/containers/podman/v2/pkg/signal" - "github.com/docker/docker/pkg/term" + "github.com/moby/term" "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/client-go/tools/remotecommand" diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go index 340f00953..946f258af 100644 --- a/pkg/domain/infra/abi/volumes.go +++ b/pkg/domain/infra/abi/volumes.go @@ -120,7 +120,7 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin return reports, nil } -func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.VolumePruneOptions) ([]*entities.VolumePruneReport, error) { +func (ic *ContainerEngine) VolumePrune(ctx context.Context) ([]*entities.VolumePruneReport, error) { return ic.pruneVolumesHelper(ctx) } diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go index f9b8106ef..26c9c7e2e 100644 --- a/pkg/domain/infra/runtime_libpod.go +++ b/pkg/domain/infra/runtime_libpod.go @@ -227,23 +227,6 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo // TODO flag to set CNI plugins dir? - // TODO I don't think these belong here? - // Will follow up with a different PR to address - // - // Pod create options - - infraImageFlag := fs.Lookup("infra-image") - if infraImageFlag != nil && infraImageFlag.Changed { - infraImage, _ := fs.GetString("infra-image") - options = append(options, libpod.WithDefaultInfraImage(infraImage)) - } - - infraCommandFlag := fs.Lookup("infra-command") - if infraCommandFlag != nil && infraImageFlag.Changed { - infraCommand, _ := fs.GetString("infra-command") - options = append(options, libpod.WithDefaultInfraCommand(infraCommand)) - } - if !opts.withFDS { options = append(options, libpod.WithEnableSDNotify()) } diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 35550b9be..9b03503c6 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -35,7 +35,7 @@ func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string) } func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []string, options entities.WaitOptions) ([]entities.WaitReport, error) { - cons, err := getContainersByContext(ic.ClientCxt, false, namesOrIds) + cons, err := getContainersByContext(ic.ClientCxt, false, false, namesOrIds) if err != nil { return nil, err } @@ -54,7 +54,7 @@ func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []strin } func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) { - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -67,7 +67,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri } func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) { - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -89,8 +89,8 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin id := strings.Split(string(content), "\n")[0] namesOrIds = append(namesOrIds, id) } - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) - if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, options.Ignore, namesOrIds) + if err != nil { return nil, err } for _, c := range ctrs { @@ -120,7 +120,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin } func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) { - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st timeout = &t } - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -169,8 +169,8 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, id := strings.Split(string(content), "\n")[0] namesOrIds = append(namesOrIds, id) } - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) - if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, options.Ignore, namesOrIds) + if err != nil { return nil, err } // TODO there is no endpoint for container eviction. Need to discuss @@ -283,7 +283,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ ) if options.All { - allCtrs, err := getContainersByContext(ic.ClientCxt, true, []string{}) + allCtrs, err := getContainersByContext(ic.ClientCxt, true, false, []string{}) if err != nil { return nil, err } @@ -295,7 +295,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ } } else { - ctrs, err = getContainersByContext(ic.ClientCxt, false, namesOrIds) + ctrs, err = getContainersByContext(ic.ClientCxt, false, false, namesOrIds) if err != nil { return nil, err } @@ -317,7 +317,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st ctrs = []entities.ListContainer{} ) if options.All { - allCtrs, err := getContainersByContext(ic.ClientCxt, true, []string{}) + allCtrs, err := getContainersByContext(ic.ClientCxt, true, false, []string{}) if err != nil { return nil, err } @@ -329,7 +329,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st } } else { - ctrs, err = getContainersByContext(ic.ClientCxt, false, namesOrIds) + ctrs, err = getContainersByContext(ic.ClientCxt, false, false, namesOrIds) if err != nil { return nil, err } @@ -389,6 +389,15 @@ func (ic *ContainerEngine) ContainerLogs(_ context.Context, nameOrIDs []string, } func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string, options entities.AttachOptions) error { + ctrs, err := getContainersByContext(ic.ClientCxt, false, false, []string{nameOrID}) + if err != nil { + return err + } + ctr := ctrs[0] + if ctr.State != define.ContainerStateRunning.String() { + return errors.Errorf("you can only attach to running containers") + } + return containers.Attach(ic.ClientCxt, nameOrID, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil) } @@ -472,27 +481,67 @@ func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { reports := []*entities.ContainerStartReport{} - for _, name := range namesOrIds { + var exitCode = define.ExecErrorCodeGeneric + ctrs, err := getContainersByContext(ic.ClientCxt, false, false, namesOrIds) + if err != nil { + return nil, err + } + // There can only be one container if attach was used + for i, ctr := range ctrs { + name := ctr.ID report := entities.ContainerStartReport{ Id: name, - RawInput: name, - ExitCode: 125, + RawInput: namesOrIds[i], + ExitCode: exitCode, } + ctrRunning := ctr.State == define.ContainerStateRunning.String() if options.Attach { - report.Err = startAndAttach(ic, name, &options.DetachKeys, options.Stdin, options.Stdout, options.Stderr) - if report.Err == nil { - exitCode, err := containers.Wait(ic.ClientCxt, name, nil) - if err == nil { - report.ExitCode = int(exitCode) + err = startAndAttach(ic, name, &options.DetachKeys, options.Stdin, options.Stdout, options.Stderr) + if err == define.ErrDetach { + // User manually detached + // Exit cleanly immediately + report.Err = err + reports = append(reports, &report) + return reports, nil + } + if ctrRunning { + reports = append(reports, &report) + return reports, nil + } + + if err != nil { + report.ExitCode = define.ExitCode(report.Err) + report.Err = err + reports = append(reports, &report) + return reports, errors.Wrapf(report.Err, "unable to start container %s", name) + } + exitCode, err := containers.Wait(ic.ClientCxt, name, nil) + if err == define.ErrNoSuchCtr { + // Check events + event, err := ic.GetLastContainerEvent(ctx, name, events.Exited) + if err != nil { + logrus.Errorf("Cannot get exit code: %v", err) + report.ExitCode = define.ExecErrorCodeNotFound + } else { + report.ExitCode = event.ContainerExitCode } } else { - report.ExitCode = define.ExitCode(report.Err) + report.ExitCode = int(exitCode) } reports = append(reports, &report) return reports, nil } - report.Err = containers.Start(ic.ClientCxt, name, &options.DetachKeys) - report.ExitCode = define.ExitCode(report.Err) + // Start the container if it's not running already. + if !ctrRunning { + err = containers.Start(ic.ClientCxt, name, &options.DetachKeys) + if err != nil { + report.Err = errors.Wrapf(err, "unable to start container %q", name) + report.ExitCode = define.ExitCode(err) + reports = append(reports, &report) + continue + } + } + report.ExitCode = 0 reports = append(reports, &report) } return reports, nil @@ -607,7 +656,7 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st } func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []string, options entities.ContainerInitOptions) ([]*entities.ContainerInitReport, error) { - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -647,7 +696,7 @@ func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, o if len(nameOrID) > 0 { namesOrIds = append(namesOrIds, nameOrID) } - ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, false, namesOrIds) if err != nil { return nil, err } @@ -673,6 +722,9 @@ func (ic *ContainerEngine) ContainerCp(ctx context.Context, source, dest string, func (ic *ContainerEngine) Shutdown(_ context.Context) { } -func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) error { - return errors.New("not implemented") +func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) (statsChan chan entities.ContainerStatsReport, err error) { + if options.Latest { + return nil, errors.New("latest is not supported for the remote client") + } + return containers.Stats(ic.ClientCxt, namesOrIds, &options.Stream) } diff --git a/pkg/domain/infra/tunnel/events.go b/pkg/domain/infra/tunnel/events.go index e6f4834b9..53bae6cef 100644 --- a/pkg/domain/infra/tunnel/events.go +++ b/pkg/domain/infra/tunnel/events.go @@ -2,8 +2,10 @@ package tunnel import ( "context" + // "fmt" "strings" + "github.com/containers/podman/v2/libpod/events" "github.com/containers/podman/v2/pkg/bindings/system" "github.com/containers/podman/v2/pkg/domain/entities" "github.com/pkg/errors" @@ -29,3 +31,33 @@ func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptio }() return system.Events(ic.ClientCxt, binChan, nil, &opts.Since, &opts.Until, filters, &opts.Stream) } + +// GetLastContainerEvent takes a container name or ID and an event status and returns +// the last occurrence of the container event +func (ic *ContainerEngine) GetLastContainerEvent(ctx context.Context, nameOrID string, containerEvent events.Status) (*events.Event, error) { + // check to make sure the event.Status is valid + if _, err := events.StringToStatus(containerEvent.String()); err != nil { + return nil, err + } + var event events.Event + return &event, nil + + /* + FIXME: We need new bindings for this section + filters := []string{ + fmt.Sprintf("container=%s", nameOrID), + fmt.Sprintf("event=%s", containerEvent), + "type=container", + } + + containerEvents, err := system.GetEvents(ctx, entities.EventsOptions{Filter: filters}) + if err != nil { + return nil, err + } + if len(containerEvents) < 1 { + return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String()) + } + // return the last element in the slice + return containerEvents[len(containerEvents)-1], nil + */ +} diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go index 0c38a3326..5944f855a 100644 --- a/pkg/domain/infra/tunnel/helpers.go +++ b/pkg/domain/infra/tunnel/helpers.go @@ -13,7 +13,7 @@ import ( "github.com/pkg/errors" ) -func getContainersByContext(contextWithConnection context.Context, all bool, namesOrIDs []string) ([]entities.ListContainer, error) { +func getContainersByContext(contextWithConnection context.Context, all, ignore bool, namesOrIDs []string) ([]entities.ListContainer, error) { var ( cons []entities.ListContainer ) @@ -36,7 +36,7 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam break } } - if !found { + if !found && !ignore { return nil, errors.Wrapf(define.ErrNoSuchCtr, "unable to find container %q", id) } } diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index 185cc2f9a..61ac2141c 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -1,26 +1,19 @@ package tunnel import ( - "archive/tar" - "bytes" "context" - "io" "io/ioutil" "os" - "path/filepath" "strings" "time" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" - "github.com/containers/podman/v2/pkg/bindings" images "github.com/containers/podman/v2/pkg/bindings/images" "github.com/containers/podman/v2/pkg/domain/entities" "github.com/containers/podman/v2/pkg/domain/utils" utils2 "github.com/containers/podman/v2/utils" - "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) { @@ -145,13 +138,8 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrID string, tags []string, } func (ir *ImageEngine) Untag(ctx context.Context, nameOrID string, tags []string, options entities.ImageUntagOptions) error { - // Remove all tags if none are provided if len(tags) == 0 { - newImage, err := images.GetImage(ir.ClientCxt, nameOrID, bindings.PFalse) - if err != nil { - return err - } - tags = newImage.NamesHistory + return images.Untag(ir.ClientCxt, nameOrID, "", "") } for _, newTag := range tags { @@ -205,6 +193,13 @@ func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions) return nil, err } defer f.Close() + fInfo, err := f.Stat() + if err != nil { + return nil, err + } + if fInfo.IsDir() { + return nil, errors.Errorf("remote client supports archives only but %q is a directory", opts.Input) + } ref := opts.Name if len(opts.Tag) > 0 { ref += ":" + opts.Tag @@ -311,28 +306,23 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) { return config.Default() } -func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { - var tarReader io.Reader - tarfile, err := archive.Tar(opts.ContextDirectory, 0) - if err != nil { - return nil, err - } - tarReader = tarfile - cwd, err := os.Getwd() +func (ir *ImageEngine) Build(_ context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { + report, err := images.Build(ir.ClientCxt, containerFiles, opts) if err != nil { return nil, err } - if cwd != opts.ContextDirectory { - fn := func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) { - h.Name = filepath.Join(filepath.Base(opts.ContextDirectory), h.Name) - return nil, false, false, nil - } - tarReader, err = transformArchive(tarfile, false, fn) + // For remote clients, if the option for writing to a file was + // selected, we need to write to the *client's* filesystem. + if len(opts.IIDFile) > 0 { + f, err := os.Create(opts.IIDFile) if err != nil { return nil, err } + if _, err := f.WriteString(report.ID); err != nil { + return nil, err + } } - return images.Build(ir.ClientCxt, containerFiles, opts, tarReader) + return report, nil } func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) { @@ -346,65 +336,3 @@ func (ir *ImageEngine) Shutdown(_ context.Context) { func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) { return nil, errors.New("not implemented yet") } - -// Sourced from openshift image builder - -// TransformFileFunc is given a chance to transform an arbitrary input file. -type TransformFileFunc func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) - -// filterArchive transforms the provided input archive to a new archive, -// giving the fn a chance to transform arbitrary files. -func filterArchive(r io.Reader, w io.Writer, fn TransformFileFunc) error { - tr := tar.NewReader(r) - tw := tar.NewWriter(w) - - var body io.Reader = tr - - for { - h, err := tr.Next() - if err == io.EOF { - return tw.Close() - } - if err != nil { - return err - } - - name := h.Name - data, ok, skip, err := fn(h, tr) - logrus.Debugf("Transform %q -> %q: data=%t ok=%t skip=%t err=%v", name, h.Name, data != nil, ok, skip, err) - if err != nil { - return err - } - if skip { - continue - } - if ok { - h.Size = int64(len(data)) - body = bytes.NewBuffer(data) - } - if err := tw.WriteHeader(h); err != nil { - return err - } - if _, err := io.Copy(tw, body); err != nil { - return err - } - } -} - -func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Reader, error) { - var cwe error - pr, pw := io.Pipe() - go func() { - if compressed { - in, err := archive.DecompressStream(r) - if err != nil { - cwe = pw.CloseWithError(err) - return - } - r = in - } - err := filterArchive(r, pw, fn) - cwe = pw.CloseWithError(err) - }() - return pr, cwe -} diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go index 074425087..d155fdd9e 100644 --- a/pkg/domain/infra/tunnel/network.go +++ b/pkg/domain/infra/tunnel/network.go @@ -26,11 +26,16 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) { reports := make([]*entities.NetworkRmReport, 0, len(namesOrIds)) for _, name := range namesOrIds { - report, err := network.Remove(ic.ClientCxt, name, &options.Force) + response, err := network.Remove(ic.ClientCxt, name, &options.Force) if err != nil { - report[0].Err = err + report := &entities.NetworkRmReport{ + Name: name, + Err: err, + } + reports = append(reports, report) + } else { + reports = append(reports, response...) } - reports = append(reports, report...) } return reports, nil } diff --git a/pkg/domain/infra/tunnel/volumes.go b/pkg/domain/infra/tunnel/volumes.go index ee2786330..e432d3292 100644 --- a/pkg/domain/infra/tunnel/volumes.go +++ b/pkg/domain/infra/tunnel/volumes.go @@ -56,7 +56,7 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin return reports, nil } -func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.VolumePruneOptions) ([]*entities.VolumePruneReport, error) { +func (ic *ContainerEngine) VolumePrune(ctx context.Context) ([]*entities.VolumePruneReport, error) { return volumes.Prune(ic.ClientCxt) } diff --git a/pkg/hooks/1.0.0/hook.go b/pkg/hooks/1.0.0/hook.go index 77fbab5aa..244e8800f 100644 --- a/pkg/hooks/1.0.0/hook.go +++ b/pkg/hooks/1.0.0/hook.go @@ -67,7 +67,14 @@ func (hook *Hook) Validate(extensionStages []string) (err error) { return errors.New("missing required property: stages") } - validStages := map[string]bool{"prestart": true, "poststart": true, "poststop": true} + validStages := map[string]bool{ + "createContainer": true, + "createRuntime": true, + "prestart": true, + "poststart": true, + "poststop": true, + "startContainer": true, + } for _, stage := range extensionStages { validStages[stage] = true } diff --git a/pkg/hooks/hooks.go b/pkg/hooks/hooks.go index 2a12eceac..6257529ab 100644 --- a/pkg/hooks/hooks.go +++ b/pkg/hooks/hooks.go @@ -120,12 +120,18 @@ func (m *Manager) Hooks(config *rspec.Spec, annotations map[string]string, hasBi extensionStageHooks[stage] = append(extensionStageHooks[stage], namedHook.hook.Hook) } else { switch stage { + case "createContainer": + config.Hooks.CreateContainer = append(config.Hooks.CreateContainer, namedHook.hook.Hook) + case "createRuntime": + config.Hooks.CreateRuntime = append(config.Hooks.CreateRuntime, namedHook.hook.Hook) case "prestart": config.Hooks.Prestart = append(config.Hooks.Prestart, namedHook.hook.Hook) case "poststart": config.Hooks.Poststart = append(config.Hooks.Poststart, namedHook.hook.Hook) case "poststop": config.Hooks.Poststop = append(config.Hooks.Poststop, namedHook.hook.Hook) + case "startContainer": + config.Hooks.StartContainer = append(config.Hooks.StartContainer, namedHook.hook.Hook) default: return extensionStageHooks, fmt.Errorf("hook %q: unknown stage %q", namedHook.name, stage) } diff --git a/pkg/registries/registries.go b/pkg/registries/registries.go index 5dff25c7d..949c5d835 100644 --- a/pkg/registries/registries.go +++ b/pkg/registries/registries.go @@ -1,5 +1,10 @@ package registries +// TODO: this package should not exist anymore. Users should either use +// c/image's `sysregistriesv2` package directly OR, even better, we cache a +// config in libpod's image runtime so we don't need to parse the +// registries.conf files redundantly. + import ( "os" "path/filepath" diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go index bbd797817..3025825db 100644 --- a/pkg/rootless/rootless_linux.go +++ b/pkg/rootless/rootless_linux.go @@ -216,6 +216,8 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo } r, w := os.NewFile(uintptr(fds[0]), "sync host"), os.NewFile(uintptr(fds[1]), "sync child") + var pid int + defer errorhandling.CloseQuiet(r) defer errorhandling.CloseQuiet(w) defer func() { @@ -226,18 +228,19 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo if _, err := w.Write(toWrite); err != nil { logrus.Errorf("failed to write byte 0: %q", err) } + if retErr != nil && pid > 0 { + if err := unix.Kill(pid, unix.SIGKILL); err != nil { + logrus.Errorf("failed to kill %d", pid) + } + C.reexec_in_user_namespace_wait(C.int(pid), 0) + } }() pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD) - pid := int(pidC) + pid = int(pidC) if pid < 0 { return false, -1, errors.Errorf("cannot re-exec process") } - defer func() { - if retErr != nil { - C.reexec_in_user_namespace_wait(pidC, 0) - } - }() uids, gids, err := GetConfiguredMappings() if err != nil { diff --git a/pkg/spec/config_linux.go b/pkg/spec/config_linux.go index d03663f12..319cce61f 100644 --- a/pkg/spec/config_linux.go +++ b/pkg/spec/config_linux.go @@ -200,6 +200,9 @@ func getDevices(path string) ([]*configs.Device, error) { } case f.Name() == "console": continue + case f.Mode()&os.ModeSymlink != 0: + // do not add symlink'd devices to privileged devices + continue } device, err := devices.DeviceFromPath(filepath.Join(path, f.Name()), "rwm") if err != nil { diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go index 42228540c..81620997f 100644 --- a/pkg/spec/spec.go +++ b/pkg/spec/spec.go @@ -182,14 +182,23 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM g.SetProcessCwd(config.WorkDir) ProcessArgs := make([]string, 0) - if len(config.Entrypoint) > 0 { - ProcessArgs = config.Entrypoint + // We need to iterate the input for entrypoint because it is a []string + // but "" is a legit json input, which translates into a []string with an + // empty position. This messes up the eventual command being executed + // in the container + for _, a := range config.Entrypoint { + if len(a) > 0 { + ProcessArgs = append(ProcessArgs, a) + } } - if len(config.Command) > 0 { - ProcessArgs = append(ProcessArgs, config.Command...) + // Same issue as explained above for config.Entrypoint. + for _, a := range config.Command { + if len(a) > 0 { + ProcessArgs = append(ProcessArgs, a) + } } - g.SetProcessArgs(ProcessArgs) + g.SetProcessArgs(ProcessArgs) g.SetProcessTerminal(config.Tty) for key, val := range config.Annotations { diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go index 147ebd61b..2ee8f2441 100644 --- a/pkg/specgen/generate/container.go +++ b/pkg/specgen/generate/container.go @@ -13,6 +13,7 @@ import ( "github.com/containers/podman/v2/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -33,7 +34,43 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat _, mediaType, err := newImage.Manifest(ctx) if err != nil { - return nil, err + if errors.Cause(err) != image.ErrImageIsBareList { + return nil, err + } + // if err is not runnable image + // use the local store image with repo@digest matches with the list, if exists + manifestByte, manifestType, err := newImage.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + list, err := manifest.ListFromBlob(manifestByte, manifestType) + if err != nil { + return nil, err + } + images, err := r.ImageRuntime().GetImages() + if err != nil { + return nil, err + } + findLocal := false + listDigest, err := list.ChooseInstance(r.SystemContext()) + if err != nil { + return nil, err + } + for _, img := range images { + for _, imageDigest := range img.Digests() { + if imageDigest == listDigest { + newImage = img + s.Image = img.ID() + mediaType = manifestType + findLocal = true + logrus.Debug("image contains manifest list, using image from local storage") + break + } + } + } + if !findLocal { + return nil, image.ErrImageIsBareList + } } if s.HealthConfig == nil && mediaType == manifest.DockerV2Schema2MediaType { @@ -75,8 +112,8 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat if err != nil { return nil, errors.Wrap(err, "error parsing fields in containers.conf") } - if defaultEnvs["containers"] == "" { - defaultEnvs["containers"] = "podman" + if defaultEnvs["container"] == "" { + defaultEnvs["container"] = "podman" } var envs map[string]string diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go index fda4c098c..2ac3b376f 100644 --- a/pkg/specgen/generate/container_create.go +++ b/pkg/specgen/generate/container_create.go @@ -95,7 +95,7 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener if len(names) > 0 { imgName = names[0] } - options = append(options, libpod.WithRootFSFromImage(newImage.ID(), imgName, s.Image)) + options = append(options, libpod.WithRootFSFromImage(newImage.ID(), imgName, s.RawImageName)) } if err := s.Validate(); err != nil { return nil, errors.Wrap(err, "invalid config provided") diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go index 0bd39d5a4..43caf0fe9 100644 --- a/pkg/specgen/generate/pod_create.go +++ b/pkg/specgen/generate/pod_create.go @@ -84,12 +84,24 @@ func createPodOptions(p *specgen.PodSpecGenerator, rt *libpod.Runtime) ([]libpod if len(p.CNINetworks) > 0 { options = append(options, libpod.WithPodNetworks(p.CNINetworks)) } + + if len(p.InfraImage) > 0 { + options = append(options, libpod.WithInfraImage(p.InfraImage)) + } + + if len(p.InfraCommand) > 0 { + options = append(options, libpod.WithInfraCommand(p.InfraCommand)) + } + switch p.NetNS.NSMode { case specgen.Bridge, specgen.Default, "": logrus.Debugf("Pod using default network mode") case specgen.Host: logrus.Debugf("Pod will use host networking") options = append(options, libpod.WithPodHostNetwork()) + case specgen.Slirp: + logrus.Debugf("Pod will use slirp4netns") + options = append(options, libpod.WithPodSlirp4netns(p.NetworkOptions)) default: return nil, errors.Errorf("pods presently do not support network mode %s", p.NetNS.NSMode) } diff --git a/pkg/specgen/generate/security.go b/pkg/specgen/generate/security.go index 87e8029a7..d17cd4a9a 100644 --- a/pkg/specgen/generate/security.go +++ b/pkg/specgen/generate/security.go @@ -7,6 +7,7 @@ import ( "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/config" "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/image" "github.com/containers/podman/v2/pkg/specgen" "github.com/containers/podman/v2/pkg/util" @@ -130,12 +131,13 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } configSpec := g.Config + configSpec.Process.Capabilities.Ambient = []string{} configSpec.Process.Capabilities.Bounding = caplist + configSpec.Process.Capabilities.Inheritable = caplist if s.User == "" || s.User == "root" || s.User == "0" { configSpec.Process.Capabilities.Effective = caplist configSpec.Process.Capabilities.Permitted = caplist - configSpec.Process.Capabilities.Inheritable = caplist } else { userCaps, err := capabilities.NormalizeCapabilities(s.CapAdd) if err != nil { @@ -167,7 +169,52 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } g.SetRootReadonly(s.ReadOnlyFilesystem) + + // Add default sysctls + defaultSysctls, err := util.ValidateSysctls(rtc.Sysctls()) + if err != nil { + return err + } + for sysctlKey, sysctlVal := range defaultSysctls { + + // Ignore mqueue sysctls if --ipc=host + if s.IpcNS.IsHost() && strings.HasPrefix(sysctlKey, "fs.mqueue.") { + logrus.Infof("Sysctl %s=%s ignored in containers.conf, since IPC Namespace set to host", sysctlKey, sysctlVal) + + continue + } + + // Ignore net sysctls if --net=host + if s.NetNS.IsHost() && strings.HasPrefix(sysctlKey, "net.") { + logrus.Infof("Sysctl %s=%s ignored in containers.conf, since Network Namespace set to host", sysctlKey, sysctlVal) + continue + } + + // Ignore uts sysctls if --uts=host + if s.UtsNS.IsHost() && (strings.HasPrefix(sysctlKey, "kernel.domainname") || strings.HasPrefix(sysctlKey, "kernel.hostname")) { + logrus.Infof("Sysctl %s=%s ignored in containers.conf, since UTS Namespace set to host", sysctlKey, sysctlVal) + continue + } + + g.AddLinuxSysctl(sysctlKey, sysctlVal) + } + for sysctlKey, sysctlVal := range s.Sysctl { + + if s.IpcNS.IsHost() && strings.HasPrefix(sysctlKey, "fs.mqueue.") { + return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since IPC Namespace set to host", sysctlKey, sysctlVal) + } + + // Ignore net sysctls if --net=host + if s.NetNS.IsHost() && strings.HasPrefix(sysctlKey, "net.") { + return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since Host Namespace set to host", sysctlKey, sysctlVal) + } + + // Ignore uts sysctls if --uts=host + if s.UtsNS.IsHost() && (strings.HasPrefix(sysctlKey, "kernel.domainname") || strings.HasPrefix(sysctlKey, "kernel.hostname")) { + return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since UTS Namespace set to host", sysctlKey, sysctlVal) + } + g.AddLinuxSysctl(sysctlKey, sysctlVal) } diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go index 7f55317ff..b225f79ee 100644 --- a/pkg/specgen/generate/storage.go +++ b/pkg/specgen/generate/storage.go @@ -195,9 +195,9 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s splitVol := strings.SplitN(volume, ":", 2) if len(splitVol) == 2 { splitOpts := strings.Split(splitVol[1], ",") + setRORW := false + setZ := false for _, opt := range splitOpts { - setRORW := false - setZ := false switch opt { case "z": if setZ { diff --git a/pkg/specgen/pod_validate.go b/pkg/specgen/pod_validate.go index d5e0aecf2..a6c61a203 100644 --- a/pkg/specgen/pod_validate.go +++ b/pkg/specgen/pod_validate.go @@ -72,9 +72,9 @@ func (p *PodSpecGenerator) Validate() error { return exclusivePodOptions("NoInfra", "NoManageResolvConf") } } - if p.NetNS.NSMode != "" && p.NetNS.NSMode != Bridge && p.NetNS.NSMode != Default { + if p.NetNS.NSMode != "" && p.NetNS.NSMode != Bridge && p.NetNS.NSMode != Slirp && p.NetNS.NSMode != Default { if len(p.PortMappings) > 0 { - return errors.New("PortMappings can only be used with Bridge mode networking") + return errors.New("PortMappings can only be used with Bridge or slirp4netns networking") } if len(p.CNINetworks) > 0 { return errors.New("CNINetworks can only be used with Bridge mode networking") @@ -95,12 +95,5 @@ func (p *PodSpecGenerator) Validate() error { return exclusivePodOptions("NoManageHosts", "HostAdd") } - // Set Defaults - if len(p.InfraImage) < 1 { - p.InfraImage = containerConfig.Engine.InfraImage - } - if len(p.InfraCommand) < 1 { - p.InfraCommand = []string{containerConfig.Engine.InfraCommand} - } return nil } diff --git a/pkg/specgen/podspecgen.go b/pkg/specgen/podspecgen.go index 3c32ec365..7d771f5bb 100644 --- a/pkg/specgen/podspecgen.go +++ b/pkg/specgen/podspecgen.go @@ -134,6 +134,9 @@ type PodNetworkConfig struct { // Conflicts with NoInfra=true and NoManageHosts. // Optional. HostAdd []string `json:"hostadd,omitempty"` + // NetworkOptions are additional options for each network + // Optional. + NetworkOptions map[string][]string `json:"network_options,omitempty"` } // PodCgroupConfig contains configuration options about a pod's cgroups. diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go index cca05eddb..b8f37ec7a 100644 --- a/pkg/specgen/specgen.go +++ b/pkg/specgen/specgen.go @@ -89,6 +89,9 @@ type ContainerBasicConfig struct { // If not given, a default location will be used. // Optional. ConmonPidFile string `json:"conmon_pid_file,omitempty"` + // RawImageName is the user-specified and unprocessed input referring + // to a local or a remote image. + RawImageName string `json:"raw_image_name,omitempty"` // RestartPolicy is the container's restart policy - an action which // will be taken when the container exits. // If not given, the default policy, which does nothing, will be used. diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go index a4fdae46e..8090bcd3d 100644 --- a/pkg/systemd/generate/containers.go +++ b/pkg/systemd/generate/containers.go @@ -256,7 +256,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst } if info.PodmanVersion == "" { - info.PodmanVersion = version.Version + info.PodmanVersion = version.Version.String() } if info.GenerateTimestamp { info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate)) diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go index c41eedd17..c0acba37d 100644 --- a/pkg/systemd/generate/pods.go +++ b/pkg/systemd/generate/pods.go @@ -299,7 +299,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions) info.ExecStopPost = "{{.Executable}} pod rm --ignore -f --pod-id-file {{.PodIDFile}}" } if info.PodmanVersion == "" { - info.PodmanVersion = version.Version + info.PodmanVersion = version.Version.String() } if info.GenerateTimestamp { info.TimeStamp = fmt.Sprintf("%v", time.Now().Format(time.UnixDate)) diff --git a/pkg/util/utils.go b/pkg/util/utils.go index 82282a549..7612d3012 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -537,33 +537,21 @@ func OpenExclusiveFile(path string) (*os.File, error) { return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) } -// PullType whether to pull new image -type PullType int +type PullType = config.PullPolicy -const ( +var ( // PullImageAlways always try to pull new image when create or run - PullImageAlways PullType = iota + PullImageAlways = config.PullImageAlways // PullImageMissing pulls image if it is not locally - PullImageMissing + PullImageMissing = config.PullImageMissing // PullImageNever will never pull new image - PullImageNever + PullImageNever = config.PullImageNever ) // ValidatePullType check if the pullType from CLI is valid and returns the valid enum type // if the value from CLI is invalid returns the error func ValidatePullType(pullType string) (PullType, error) { - switch pullType { - case "always": - return PullImageAlways, nil - case "missing", "IfNotPresent": - return PullImageMissing, nil - case "never": - return PullImageNever, nil - case "": - return PullImageMissing, nil - default: - return PullImageMissing, errors.Errorf("invalid pull type %q", pullType) - } + return config.ValidatePullPolicy(pullType) } // ExitCode reads the error message when failing to executing container process diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go index 1dcfa2213..4bcf70b0d 100644 --- a/pkg/varlinkapi/images.go +++ b/pkg/varlinkapi/images.go @@ -23,7 +23,7 @@ import ( "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/image" - "github.com/containers/podman/v2/pkg/channelwriter" + "github.com/containers/podman/v2/pkg/channel" "github.com/containers/podman/v2/pkg/util" iopodman "github.com/containers/podman/v2/pkg/varlink" "github.com/containers/podman/v2/utils" @@ -570,7 +570,7 @@ func (i *VarlinkAPI) Commit(call iopodman.VarlinkCall, name, imageName string, c log []string mimeType string ) - output := channelwriter.NewChannelWriter() + output := channel.NewWriter(make(chan []byte)) channelClose := func() { if err := output.Close(); err != nil { logrus.Errorf("failed to close channel writer: %q", err) @@ -704,7 +704,7 @@ func (i *VarlinkAPI) PullImage(call iopodman.VarlinkCall, name string, creds iop if call.WantsMore() { call.Continues = true } - output := channelwriter.NewChannelWriter() + output := channel.NewWriter(make(chan []byte)) channelClose := func() { if err := output.Close(); err != nil { logrus.Errorf("failed to close channel writer: %q", err) diff --git a/pkg/varlinkapi/system.go b/pkg/varlinkapi/system.go index 9e4db2611..e5c766a6d 100644 --- a/pkg/varlinkapi/system.go +++ b/pkg/varlinkapi/system.go @@ -7,6 +7,7 @@ import ( "fmt" "os" goruntime "runtime" + "strconv" "time" "github.com/containers/image/v5/pkg/sysregistriesv2" @@ -22,13 +23,18 @@ func (i *VarlinkAPI) GetVersion(call iopodman.VarlinkCall) error { return err } + int64APIVersion, err := strconv.ParseInt(versionInfo.APIVersion, 10, 64) + if err != nil { + return err + } + return call.ReplyGetVersion( versionInfo.Version, versionInfo.GoVersion, versionInfo.GitCommit, time.Unix(versionInfo.Built, 0).Format(time.RFC3339), versionInfo.OsArch, - versionInfo.APIVersion, + int64APIVersion, ) } diff --git a/pkg/varlinkapi/util.go b/pkg/varlinkapi/util.go index 748d8f9cc..7f96965f0 100644 --- a/pkg/varlinkapi/util.go +++ b/pkg/varlinkapi/util.go @@ -11,7 +11,7 @@ import ( "github.com/containers/buildah" "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" - "github.com/containers/podman/v2/pkg/channelwriter" + "github.com/containers/podman/v2/pkg/channel" iopodman "github.com/containers/podman/v2/pkg/varlink" "github.com/containers/storage/pkg/archive" ) @@ -201,7 +201,7 @@ func makePsOpts(inOpts iopodman.PsOpts) PsOptions { // more. it is capable of sending updates as the output writer gets them or append them // all to a log. the chan error is the error from the libpod call so we can honor // and error event in that case. -func forwardOutput(log []string, c chan error, wantsMore bool, output *channelwriter.Writer, reply func(br iopodman.MoreResponse) error) ([]string, error) { +func forwardOutput(log []string, c chan error, wantsMore bool, output channel.WriteCloser, reply func(br iopodman.MoreResponse) error) ([]string, error) { done := false for { select { @@ -214,7 +214,7 @@ func forwardOutput(log []string, c chan error, wantsMore bool, output *channelwr done = true // if no error is found, we pull what we can from the log writer and // append it to log string slice - case line := <-output.ByteChannel: + case line := <-output.Chan(): log = append(log, string(line)) // If the end point is being used in more mode, send what we have if wantsMore { |