diff options
42 files changed, 2906 insertions, 204 deletions
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 78e13c227..c9c676ccb 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,5 +1,22 @@ # Release Notes +## 1.8.2 + +### Bugfixes +- Fixed a bug where unit files generates by `podman generate systemd --new` would not force containers to detach, causing the unit to time out when trying to start +- Fixed a bug where `podman system reset` could delete important system directories if run as rootless on installations created by older Podman ([#4831](https://github.com/containers/libpod/issues/4831)) +- Fixed a bug where image built by `podman build` would not properly set the OS and Architecture they were built with ([#5503](https://github.com/containers/libpod/issues/5503)) +- Fixed a bug where attached `podman run` with `--sig-proxy` enabled (the default), when built with Go 1.14, would repeatedly send signal 23 to the process in the container and could generate errors when the container stopped ([#5483](https://github.com/containers/libpod/issues/5483)) +- Fixed a bug where rootless `podman run` commands could hang when forwarding ports + +### HTTP API +- Initial support for Libpod endpoints related to creating and operating on image manifest lists has been added +- The Libpod Healthcheck and Events API endpoints are now supported + +### Misc +- Updated vendored containers/storage to v1.16.5 +- Several performance improvements have been made to creating containers, which should somewhat improve the performance of `podman create` and `podman run` + ## 1.8.1 ### Features - Many networking-related flags have been added to `podman pod create` to enable customization of pod networks, including `--add-host`, `--dns`, `--dns-opt`, `--dns-search`, `--ip`, `--mac-address`, `--network`, and `--no-hosts` diff --git a/changelog.txt b/changelog.txt index 651ef89b8..e1f65fe9b 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,33 @@ +- Changelog for v1.8.2-rc1 (2020-03-17) + * Update release notes for v1.8.2-rc1 + * Fix vendoring on master + * Update containers/storage to v1.16.5 + * config: make warning clearer + * Four small CI fixes: + * fix systemd generate tests + * apiv2 addition of manifests + * add os|arch attributes when building + * Missing double quotes in troubleshooting guide. + * force run container detached if container CreateCommand missing the detach param + * Bump github.com/containers/common from 0.4.2 to 0.5.0 + * Bump k8s.io/api from 0.17.3 to 0.17.4 + * Bump github.com/fsnotify/fsnotify from 1.4.7 to 1.4.9 + * eat signal 23 in signal proxy + * add apiv2 healthcheck code + * turn off color-mode for bindings + * remove imagefilter for varlink remote client + * Bump github.com/containers/storage from 1.16.2 to 1.16.3 + * run --rmi test: make it work + * rootlessport: detect rootless-child exit + * create: do not calculate image size + * Follow up changes from #5244 + * man page cross-reference fixes: part 2 + * Update version in README to v1.8.1 + * [CI:DOCS]Add libpod event endpoint + * Bump to v1.8.2-dev + * Update start stop api to use pod status function. + * Fix bug podman reset to not remove $XDG_RUNTIME_DIR + - Changelog for v1.8.1 (2020-03-11) * man pages: fix inconsistencies * Update release notes for v1.8.1 final release diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index 5001ef4dd..9b4a56acd 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -47,6 +47,15 @@ case "${OS_RELEASE_ID}" in fi ;; fedora) + # This is temporary and should be removed once conmon is in stable + # and the images can be rebuilt properly. + if [[ "$OS_RELEASE_VER" -eq "30" ]]; then + dnf -y install https://kojipkgs.fedoraproject.org//packages/conmon/2.0.13/1.fc30/x86_64/conmon-2.0.13-1.fc30.x86_64.rpm + else + dnf -y install https://kojipkgs.fedoraproject.org//packages/conmon/2.0.13/1.fc31/x86_64/conmon-2.0.13-1.fc31.x86_64.rpm + fi + # End of temporary patch + # All SELinux distros need this for systemd-in-a-container setsebool container_manage_cgroup true if [[ "$ADD_SECOND_PARTITION" == "true" ]]; then @@ -14,7 +14,7 @@ require ( github.com/containers/conmon v2.0.10+incompatible github.com/containers/image/v5 v5.2.1 github.com/containers/psgo v1.4.0 - github.com/containers/storage v1.16.3 + github.com/containers/storage v1.16.5 github.com/coreos/go-systemd/v22 v22.0.0 github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b github.com/cyphar/filepath-securejoin v0.2.2 @@ -94,6 +94,8 @@ github.com/containers/storage v1.16.2 h1:S77Y+lmJcnGoPEZB2OOrTrRGyjT8viDCGyhVNNz github.com/containers/storage v1.16.2/go.mod h1:/RNmsK01ajCL+VtMSi3W8kHzpBwN+Q5gLYWgfw5wlMg= github.com/containers/storage v1.16.3 h1:bctiz1I+0TIivtXbrVmy02ZYlOA+IjKIJMzAMTBifj8= github.com/containers/storage v1.16.3/go.mod h1:dNTv0+BaebIAOGgH34dPtwGPR+Km2fObcfOlFxYFwA0= +github.com/containers/storage v1.16.5 h1:eHeWEhUEWX3VMIG1Vn1rEjfRoLHUQev3cwtA5zd89wk= +github.com/containers/storage v1.16.5/go.mod h1:SdysZeLKJOvfHYysUWg9OZUC3gdZWi5b2b7NC18VpPE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38= @@ -275,9 +277,13 @@ github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/If github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.2 h1:8d4I0LDiieuGngsqlqOih9ker/NS0LX4V0i+EhiFWg0= +github.com/klauspost/pgzip v1.2.2/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/libpod/config/config.go b/libpod/config/config.go index 5d59f1bf2..b94c1b20b 100644 --- a/libpod/config/config.go +++ b/libpod/config/config.go @@ -554,7 +554,7 @@ func (c *Config) checkCgroupsAndLogger() { } if !hasSession { - logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available") + logrus.Warningf("The cgroup manager or the events logger is set to use systemd but there is no systemd user session available") logrus.Warningf("For using systemd, you may need to login using an user session") logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", rootless.GetRootlessUID()) logrus.Warningf("Falling back to --cgroup-manager=cgroupfs and --events-backend=file") diff --git a/libpod/image/manifests.go b/libpod/image/manifests.go new file mode 100644 index 000000000..9dbeb4cc5 --- /dev/null +++ b/libpod/image/manifests.go @@ -0,0 +1,154 @@ +package image + +import ( + "context" + + "github.com/containers/buildah/manifests" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Options for adding a manifest +// swagger:model ManifestAddOpts +type ManifestAddOpts struct { + All bool `json:"all"` + Annotation map[string]string `json:"annotation"` + Arch string `json:"arch"` + Features []string `json:"features"` + Images []string `json:"images"` + OSVersion string `json:"os_version"` + Variant string `json:"variant"` +} + +// InspectManifest returns a dockerized version of the manifest list +func (i *Image) InspectManifest() (*manifest.Schema2List, error) { + list, err := i.getManifestList() + if err != nil { + return nil, err + } + return list.Docker(), nil +} + +// RemoveManifest removes the given digest from the manifest list. +func (i *Image) RemoveManifest(d digest.Digest) (string, error) { + list, err := i.getManifestList() + if err != nil { + return "", err + } + if err := list.Remove(d); err != nil { + return "", err + } + return list.SaveToImage(i.imageruntime.store, i.ID(), nil, "") +} + +// getManifestList is a helper to obtain a manifest list +func (i *Image) getManifestList() (manifests.List, error) { + _, list, err := manifests.LoadFromImage(i.imageruntime.store, i.ID()) + return list, err +} + +// CreateManifestList creates a new manifest list and can optionally add given images +// to the list +func CreateManifestList(rt *Runtime, systemContext types.SystemContext, names []string, imgs []string, all bool) (string, error) { + list := manifests.Create() + opts := ManifestAddOpts{Images: names, All: all} + for _, img := range imgs { + var ref types.ImageReference + newImage, err := rt.NewFromLocal(img) + if err == nil { + ir, err := newImage.toImageRef(context.Background()) + if err != nil { + return "", err + } + if ir == nil { + return "", errors.New("unable to convert image to ImageReference") + } + ref = ir.Reference() + } else { + ref, err = alltransports.ParseImageName(img) + if err != nil { + return "", err + } + } + list, err = addManifestToList(ref, list, systemContext, opts) + if err != nil { + return "", err + } + } + return list.SaveToImage(rt.store, "", names, manifest.DockerV2ListMediaType) +} + +func addManifestToList(ref types.ImageReference, list manifests.List, systemContext types.SystemContext, opts ManifestAddOpts) (manifests.List, error) { + d, err := list.Add(context.Background(), &systemContext, ref, opts.All) + if err != nil { + return nil, err + } + if len(opts.OSVersion) > 0 { + if err := list.SetOSVersion(d, opts.OSVersion); err != nil { + return nil, err + } + } + if len(opts.Features) > 0 { + if err := list.SetFeatures(d, opts.Features); err != nil { + return nil, err + } + } + if len(opts.Arch) > 0 { + if err := list.SetArchitecture(d, opts.Arch); err != nil { + return nil, err + } + } + if len(opts.Variant) > 0 { + if err := list.SetVariant(d, opts.Variant); err != nil { + return nil, err + } + } + if len(opts.Annotation) > 0 { + if err := list.SetAnnotations(&d, opts.Annotation); err != nil { + return nil, err + } + } + return list, err +} + +// AddManifest adds a manifest to a given manifest list. +func (i *Image) AddManifest(systemContext types.SystemContext, opts ManifestAddOpts) (string, error) { + var ( + ref types.ImageReference + ) + newImage, err := i.imageruntime.NewFromLocal(opts.Images[0]) + if err == nil { + ir, err := newImage.toImageRef(context.Background()) + if err != nil { + return "", err + } + ref = ir.Reference() + } else { + ref, err = alltransports.ParseImageName(opts.Images[0]) + if err != nil { + return "", err + } + } + list, err := i.getManifestList() + if err != nil { + return "", err + } + list, err = addManifestToList(ref, list, systemContext, opts) + if err != nil { + return "", err + } + return list.SaveToImage(i.imageruntime.store, i.ID(), nil, "") +} + +// PushManifest pushes a manifest to a destination +func (i *Image) PushManifest(dest types.ImageReference, opts manifests.PushOptions) (digest.Digest, error) { + list, err := i.getManifestList() + if err != nil { + return "", err + } + _, d, err := list.Push(context.Background(), dest, opts) + return d, err +} diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index a5530e448..d3c3bbcc5 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -774,6 +774,10 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options }() attachToExecCalled = true + if err := execCmd.Wait(); err != nil { + return -1, nil, errors.Wrapf(err, "cannot run conmon") + } + pid, err := readConmonPipeData(parentSyncPipe, ociLog) return pid, attachChan, err diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go new file mode 100644 index 000000000..a3d2caba6 --- /dev/null +++ b/pkg/api/handlers/libpod/manifests.go @@ -0,0 +1,166 @@ +package libpod + +import ( + "encoding/json" + "net/http" + + "github.com/containers/buildah/manifests" + copy2 "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/api/handlers" + "github.com/containers/libpod/pkg/api/handlers/utils" + "github.com/gorilla/schema" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func ManifestCreate(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + query := struct { + Name []string `schema:"name"` + Image []string `schema:"image"` + All bool `schema:"all"` + }{ + // Add defaults here once needed. + } + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + return + } + rtc, err := runtime.GetConfig() + if err != nil { + utils.InternalServerError(w, err) + return + } + sc := image.GetSystemContext(rtc.SignaturePolicyPath, "", false) + manID, err := image.CreateManifestList(runtime.ImageRuntime(), *sc, query.Name, query.Image, query.All) + if err != nil { + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, handlers.IDResponse{ID: manID}) +} + +func ManifestInspect(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + name := utils.GetName(r) + newImage, err := runtime.ImageRuntime().NewFromLocal(name) + if err != nil { + utils.ImageNotFound(w, name, err) + return + } + data, err := newImage.InspectManifest() + if err != nil { + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, data) +} + +func ManifestAdd(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + var manifestInput image.ManifestAddOpts + if err := json.NewDecoder(r.Body).Decode(&manifestInput); err != nil { + utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()")) + return + } + name := utils.GetName(r) + newImage, err := runtime.ImageRuntime().NewFromLocal(name) + if err != nil { + utils.ImageNotFound(w, name, err) + return + } + rtc, err := runtime.GetConfig() + if err != nil { + utils.InternalServerError(w, err) + return + } + sc := image.GetSystemContext(rtc.SignaturePolicyPath, "", false) + newID, err := newImage.AddManifest(*sc, manifestInput) + if err != nil { + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, handlers.IDResponse{ID: newID}) +} + +func ManifestRemove(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + query := struct { + Digest string `schema:"digest"` + }{ + // Add defaults here once needed. + } + name := utils.GetName(r) + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + return + } + newImage, err := runtime.ImageRuntime().NewFromLocal(name) + if err != nil { + utils.ImageNotFound(w, name, err) + return + } + d, err := digest.Parse(query.Digest) + if err != nil { + utils.Error(w, "invalid digest", http.StatusBadRequest, err) + return + } + newID, err := newImage.RemoveManifest(d) + if err != nil { + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, handlers.IDResponse{ID: newID}) +} +func ManifestPush(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + decoder := r.Context().Value("decoder").(*schema.Decoder) + query := struct { + All bool `schema:"all"` + Destination string `schema:"destination"` + }{ + // Add defaults here once needed. + } + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + return + } + name := utils.GetName(r) + newImage, err := runtime.ImageRuntime().NewFromLocal(name) + if err != nil { + utils.ImageNotFound(w, name, err) + return + } + dest, err := alltransports.ParseImageName(query.Destination) + if err != nil { + utils.Error(w, "invalid destination parameter", http.StatusBadRequest, errors.Errorf("invalid destination parameter %q", query.Destination)) + return + } + rtc, err := runtime.GetConfig() + if err != nil { + utils.InternalServerError(w, err) + return + } + sc := image.GetSystemContext(rtc.SignaturePolicyPath, "", false) + opts := manifests.PushOptions{ + ImageListSelection: copy2.CopySpecificImages, + SystemContext: sc, + } + if query.All { + opts.ImageListSelection = copy2.CopyAllImages + } + newD, err := newImage.PushManifest(dest, opts) + if err != nil { + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, newD.String()) +} diff --git a/pkg/api/handlers/libpod/swagger.go b/pkg/api/handlers/libpod/swagger.go index aec30ef56..f6a26134b 100644 --- a/pkg/api/handlers/libpod/swagger.go +++ b/pkg/api/handlers/libpod/swagger.go @@ -1,8 +1,17 @@ package libpod +import "github.com/containers/image/v5/manifest" + // List Containers // swagger:response ListContainers type swagInspectPodResponse struct { // in:body Body []ListContainer } + +// Inspect Manifest +// swagger:response InspectManifest +type swagInspectManifestResponse struct { + // in:body + Body manifest.List +} diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index 2e429dc58..ce4a9957b 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -140,7 +140,9 @@ type VolumeCreateConfig struct { Opts map[string]string `schema:"opts"` } +// swagger:model IDResponse type IDResponse struct { + // ID ID string `json:"id"` } diff --git a/pkg/api/server/register_manifest.go b/pkg/api/server/register_manifest.go new file mode 100644 index 000000000..ccfc2192d --- /dev/null +++ b/pkg/api/server/register_manifest.go @@ -0,0 +1,145 @@ +package server + +import ( + "net/http" + + "github.com/containers/libpod/pkg/api/handlers/libpod" + "github.com/gorilla/mux" +) + +func (s *APIServer) registerManifestHandlers(r *mux.Router) error { + // swagger:operation POST /libpod/manifests/create manifests Create + // --- + // summary: Create + // description: Create a manifest list + // produces: + // - application/json + // parameters: + // - in: query + // name: name + // type: string + // description: manifest list name + // required: true + // - in: query + // name: image + // type: string + // description: name of the image + // - in: query + // name: all + // type: boolean + // description: add all contents if given list + // responses: + // 200: + // $ref: "#/definitions/IDResponse" + // 400: + // $ref: "#/responses/BadParamError" + // 404: + // $ref: "#/responses/NoSuchImage" + // 500: + // $ref: "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/manifests/create"), s.APIHandler(libpod.ManifestCreate)).Methods(http.MethodPost) + // swagger:operation GET /libpod/manifests/{name}/json manifests Inspect + // --- + // summary: Inspect + // description: Display a manifest list + // produces: + // - application/json + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the name or ID of the manifest + // responses: + // 200: + // $ref: "#/responses/InspectManifest" + // 404: + // $ref: "#/responses/NoSuchManifest" + // 500: + // $ref: "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/manifests/{name:.*}/json"), s.APIHandler(libpod.ManifestInspect)).Methods(http.MethodGet) + // swagger:operation POST /libpod/manifests/{name}/add manifests AddManifest + // --- + // description: Add an image to a manifest list + // produces: + // - application/json + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the name or ID of the manifest + // - in: body + // name: options + // description: options for creating a manifest + // schema: + // $ref: "#/definitions/ManifestAddOpts" + // responses: + // 200: + // $ref: "#/definitions/IDResponse" + // 404: + // $ref: "#/responses/NoSuchManifest" + // 409: + // $ref: "#/responses/BadParamError" + // 500: + // $ref: "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/manifests/{name:.*}/add"), s.APIHandler(libpod.ManifestAdd)).Methods(http.MethodPost) + // swagger:operation DELETE /libpod/manifests/{name} manifests RemoveManifest + // --- + // summary: Remove + // description: Remove an image from a manifest list + // produces: + // - application/json + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the image associated with the manifest + // - in: query + // name: digest + // type: string + // description: image digest to be removed + // responses: + // 200: + // $ref: "#/definitions/IDResponse" + // 400: + // $ref: "#/responses/BadParamError" + // 404: + // $ref: "#/responses/NoSuchManifest" + // 500: + // $ref: "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/manifests/{name:.*}"), s.APIHandler(libpod.ManifestRemove)).Methods(http.MethodDelete) + // swagger:operation POST /libpod/manifests/{name}/push manifests PushManifest + // --- + // summary: Push + // description: Push a manifest list or image index to a registry + // produces: + // - application/json + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the name or ID of the manifest + // - in: query + // name: destination + // type: string + // required: true + // description: the destination for the manifest + // - in: query + // name: all + // description: push all images + // type: boolean + // responses: + // 200: + // $ref: "#/definitions/IDResponse" + // 400: + // $ref: "#/responses/BadParamError" + // 404: + // $ref: "#/responses/NoSuchManifest" + // 500: + // $ref: "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/manifests/{name}/push"), s.APIHandler(libpod.ManifestPush)).Methods(http.MethodPost) + return nil +} diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go index a0addb303..8496cd11c 100644 --- a/pkg/api/server/server.go +++ b/pkg/api/server/server.go @@ -99,11 +99,12 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li server.registerAuthHandlers, server.registerContainersHandlers, server.registerDistributionHandlers, - server.registerExecHandlers, server.registerEventsHandlers, + server.registerExecHandlers, server.registerHealthCheckHandlers, server.registerImagesHandlers, server.registerInfoHandlers, + server.registerManifestHandlers, server.registerMonitorHandlers, server.registerPingHandlers, server.registerPluginsHandlers, diff --git a/pkg/api/server/swagger.go b/pkg/api/server/swagger.go index e3c991d6d..d2cf7503e 100644 --- a/pkg/api/server/swagger.go +++ b/pkg/api/server/swagger.go @@ -51,6 +51,15 @@ type swagErrNoSuchPod struct { } } +// No such manifest +// swagger:response NoSuchManifest +type swagErrNoSuchManifest struct { + // in:body + Body struct { + utils.ErrorModel + } +} + // Internal server error // swagger:response InternalError type swagInternalError struct { diff --git a/pkg/api/tags.yaml b/pkg/api/tags.yaml index 571f49e44..5b5d9f5bb 100644 --- a/pkg/api/tags.yaml +++ b/pkg/api/tags.yaml @@ -6,6 +6,8 @@ tags: - name: images description: Actions related to images - name: pods + description: Actions related to manifests + - name: manifests description: Actions related to pods - name: volumes description: Actions related to volumes diff --git a/pkg/bindings/manifests/manifests.go b/pkg/bindings/manifests/manifests.go new file mode 100644 index 000000000..a8d1e6ca3 --- /dev/null +++ b/pkg/bindings/manifests/manifests.go @@ -0,0 +1,126 @@ +package manifests + +import ( + "context" + "errors" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/api/handlers" + "github.com/containers/libpod/pkg/bindings" + jsoniter "github.com/json-iterator/go" +) + +// Create creates a manifest for the given name. Optional images to be associated with +// the new manifest can also be specified. The all boolean specifies to add all entries +// of a list if the name provided is a manifest list. The ID of the new manifest list +// is returned as a string. +func Create(ctx context.Context, names, images []string, all *bool) (string, error) { + var idr handlers.IDResponse + conn, err := bindings.GetClient(ctx) + if err != nil { + return "", err + } + if len(names) < 1 { + return "", errors.New("creating a manifest requires at least one name argument") + } + params := url.Values{} + if all != nil { + params.Set("all", strconv.FormatBool(*all)) + } + for _, name := range names { + params.Add("name", name) + } + for _, i := range images { + params.Add("image", i) + } + + response, err := conn.DoRequest(nil, http.MethodPost, "/manifests/create", params) + if err != nil { + return "", err + } + return idr.ID, response.Process(&idr) +} + +// Inspect returns a manifest list for a given name. +func Inspect(ctx context.Context, name string) (*manifest.Schema2List, error) { + var list manifest.Schema2List + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(nil, http.MethodGet, "/manifests/%s/json", nil, name) + if err != nil { + return nil, err + } + return &list, response.Process(&list) +} + +// Add adds a manifest to a given manifest list. Additional options for the manifest +// can also be specified. The ID of the new manifest list is returned as a string +func Add(ctx context.Context, name string, options image.ManifestAddOpts) (string, error) { + var idr handlers.IDResponse + conn, err := bindings.GetClient(ctx) + if err != nil { + return "", err + } + optionsString, err := jsoniter.MarshalToString(options) + if err != nil { + return "", err + } + stringReader := strings.NewReader(optionsString) + response, err := conn.DoRequest(stringReader, http.MethodPost, "/manifests/%s/add", nil, name) + if err != nil { + return "", err + } + return idr.ID, response.Process(&idr) +} + +// Remove deletes a manifest entry from a manifest list. Both name and the digest to be +// removed are mandatory inputs. The ID of the new manifest list is returned as a string. +func Remove(ctx context.Context, name, digest string) (string, error) { + var idr handlers.IDResponse + conn, err := bindings.GetClient(ctx) + if err != nil { + return "", err + } + params := url.Values{} + params.Set("digest", digest) + response, err := conn.DoRequest(nil, http.MethodDelete, "/manifests/%s", params, name) + if err != nil { + return "", err + } + return idr.ID, response.Process(&idr) +} + +// Push takes a manifest list and pushes to a destination. If the destination is not specified, +// the name will be used instead. If the optional all boolean is specified, all images specified +// in the list will be pushed as well. +func Push(ctx context.Context, name string, destination *string, all *bool) (string, error) { + var ( + idr handlers.IDResponse + ) + dest := name + conn, err := bindings.GetClient(ctx) + if err != nil { + return "", err + } + params := url.Values{} + params.Set("image", name) + if destination != nil { + dest = name + } + params.Set("destination", dest) + if all != nil { + params.Set("all", strconv.FormatBool(*all)) + } + response, err := conn.DoRequest(nil, http.MethodPost, "/manifests/%s/push", params, name) + if err != nil { + return "", err + } + return idr.ID, response.Process(&idr) +} diff --git a/pkg/bindings/test/manifests_test.go b/pkg/bindings/test/manifests_test.go new file mode 100644 index 000000000..23c3d8194 --- /dev/null +++ b/pkg/bindings/test/manifests_test.go @@ -0,0 +1,124 @@ +package test_bindings + +import ( + "net/http" + "time" + + "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/bindings" + "github.com/containers/libpod/pkg/bindings/images" + "github.com/containers/libpod/pkg/bindings/manifests" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +var _ = Describe("Podman containers ", func() { + var ( + bt *bindingTest + s *gexec.Session + ) + + BeforeEach(func() { + bt = newBindingTest() + bt.RestoreImagesFromCache() + s = bt.startAPIService() + time.Sleep(1 * time.Second) + err := bt.NewConnection() + Expect(err).To(BeNil()) + }) + + AfterEach(func() { + s.Kill() + bt.cleanup() + }) + + It("create manifest", func() { + // create manifest list without images + id, err := manifests.Create(bt.conn, []string{"quay.io/libpod/foobar:latest"}, []string{}, nil) + Expect(err).To(BeNil()) + list, err := manifests.Inspect(bt.conn, id) + Expect(err).To(BeNil()) + Expect(len(list.Manifests)).To(BeZero()) + + // creating a duplicate should fail as a 500 + _, err = manifests.Create(bt.conn, []string{"quay.io/libpod/foobar:latest"}, []string{}, nil) + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) + + _, err = images.Remove(bt.conn, id, nil) + Expect(err).To(BeNil()) + + // create manifest list with images + id, err = manifests.Create(bt.conn, []string{"quay.io/libpod/foobar:latest"}, []string{alpine.name}, nil) + Expect(err).To(BeNil()) + list, err = manifests.Inspect(bt.conn, id) + Expect(err).To(BeNil()) + Expect(len(list.Manifests)).To(BeNumerically("==", 1)) + }) + + It("inspect bogus manifest", func() { + _, err := manifests.Inspect(bt.conn, "larry") + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusNotFound)) + }) + + It("add manifest", func() { + // add to bogus should 404 + _, err := manifests.Add(bt.conn, "foobar", image.ManifestAddOpts{}) + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusNotFound)) + + id, err := manifests.Create(bt.conn, []string{"quay.io/libpod/foobar:latest"}, []string{}, nil) + Expect(err).To(BeNil()) + opts := image.ManifestAddOpts{Images: []string{alpine.name}} + _, err = manifests.Add(bt.conn, id, opts) + Expect(err).To(BeNil()) + list, err := manifests.Inspect(bt.conn, id) + Expect(err).To(BeNil()) + Expect(len(list.Manifests)).To(BeNumerically("==", 1)) + + // add bogus name to existing list should fail + opts.Images = []string{"larry"} + _, err = manifests.Add(bt.conn, id, opts) + Expect(err).ToNot(BeNil()) + code, _ = bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) + }) + + It("remove manifest", func() { + // removal on bogus manifest list should be 404 + _, err := manifests.Remove(bt.conn, "larry", "1234") + Expect(err).ToNot(BeNil()) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusNotFound)) + + id, err := manifests.Create(bt.conn, []string{"quay.io/libpod/foobar:latest"}, []string{alpine.name}, nil) + Expect(err).To(BeNil()) + data, err := manifests.Inspect(bt.conn, id) + Expect(err).To(BeNil()) + Expect(len(data.Manifests)).To(BeNumerically("==", 1)) + + // removal on a good manifest list with a bad digest should be 400 + _, err = manifests.Remove(bt.conn, id, "!234") + Expect(err).ToNot(BeNil()) + code, _ = bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusBadRequest)) + + digest := data.Manifests[0].Digest.String() + _, err = manifests.Remove(bt.conn, id, digest) + Expect(err).To(BeNil()) + + // removal on good manifest with good digest should work + data, err = manifests.Inspect(bt.conn, id) + Expect(err).To(BeNil()) + Expect(len(data.Manifests)).To(BeZero()) + }) + + It("push manifest", func() { + Skip("TODO") + }) +}) diff --git a/vendor/github.com/containers/buildah/manifests/copy.go b/vendor/github.com/containers/buildah/manifests/copy.go new file mode 100644 index 000000000..7e651a46c --- /dev/null +++ b/vendor/github.com/containers/buildah/manifests/copy.go @@ -0,0 +1,15 @@ +package manifests + +import ( + "github.com/containers/image/v5/signature" +) + +var ( + // storageAllowedPolicyScopes overrides the policy for local storage + // to ensure that we can read images from it. + storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, + } +) diff --git a/vendor/github.com/containers/buildah/manifests/manifests.go b/vendor/github.com/containers/buildah/manifests/manifests.go new file mode 100644 index 000000000..0fe7e477b --- /dev/null +++ b/vendor/github.com/containers/buildah/manifests/manifests.go @@ -0,0 +1,397 @@ +package manifests + +import ( + "context" + "encoding/json" + stderrors "errors" + "io" + + "github.com/containers/buildah/pkg/manifests" + "github.com/containers/buildah/pkg/supplemented" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const instancesData = "instances.json" + +// ErrListImageUnknown is returned when we attempt to create an image reference +// for a List that has not yet been saved to an image. +var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") + +type list struct { + manifests.List + instances map[digest.Digest]string +} + +// List is a manifest list or image index, either created using Create(), or +// loaded from local storage using LoadFromImage(). +type List interface { + manifests.List + SaveToImage(store storage.Store, imageID string, names []string, mimeType string) (string, error) + Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) + Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) + Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) +} + +// PushOptions includes various settings which are needed for pushing the +// manifest list and its instances. +type PushOptions struct { + Store storage.Store + SystemContext *types.SystemContext // github.com/containers/image/types.SystemContext + ImageListSelection cp.ImageListSelection // set to either CopySystemImage, CopyAllImages, or CopySpecificImages + Instances []digest.Digest // instances to copy if ImageListSelection == CopySpecificImages + ReportWriter io.Writer // will be used to log the writing of the list and any blobs + SignBy string // fingerprint of GPG key to use to sign images + RemoveSignatures bool // true to discard signatures in images + ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 +} + +// Create creates a new list containing information about the specified image, +// computing its manifest's digest, and retrieving OS and architecture +// information from its configuration blob. Returns the new list, and the +// instanceDigest for the initial image. +func Create() List { + return &list{ + List: manifests.Create(), + instances: make(map[digest.Digest]string), + } +} + +// LoadFromImage reads the manifest list or image index, and additional +// information about where the various instances that it contains live, from an +// image record with the specified ID in local storage. +func LoadFromImage(store storage.Store, image string) (string, List, error) { + img, err := store.Image(image) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + } + manifestBytes, err := store.ImageBigData(img.ID, storage.ImageDigestManifestBigDataNamePrefix) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + } + manifestList, err := manifests.FromBlob(manifestBytes) + if err != nil { + return "", nil, err + } + list := &list{ + List: manifestList, + instances: make(map[digest.Digest]string), + } + instancesBytes, err := store.ImageBigData(img.ID, instancesData) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading instance list", image) + } + if err := json.Unmarshal(instancesBytes, &list.instances); err != nil { + return "", nil, errors.Wrapf(err, "error decoding instance list for image %q", image) + } + list.instances[""] = img.ID + return img.ID, list, err +} + +// SaveToImage saves the manifest list or image index as the manifest of an +// Image record with the specified names in local storage, generating a random +// image ID if none is specified. It also stores information about where the +// images whose manifests are included in the list can be found. +func (l *list) SaveToImage(store storage.Store, imageID string, names []string, mimeType string) (string, error) { + manifestBytes, err := l.List.Serialize(mimeType) + if err != nil { + return "", err + } + instancesBytes, err := json.Marshal(&l.instances) + if err != nil { + return "", err + } + img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{}) + if err == nil || errors.Cause(err) == storage.ErrDuplicateID { + created := (err == nil) + if created { + imageID = img.ID + l.instances[""] = img.ID + } + err := store.SetImageBigData(imageID, storage.ImageDigestManifestBigDataNamePrefix, manifestBytes, manifest.Digest) + if err != nil { + if created { + if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { + logrus.Errorf("error deleting image %q after failing to save manifest for it", img.ID) + } + } + return "", errors.Wrapf(err, "error saving manifest list to image %q", imageID) + } + err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil) + if err != nil { + if created { + if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { + logrus.Errorf("error deleting image %q after failing to save instance locations for it", img.ID) + } + } + return "", errors.Wrapf(err, "error saving instance list to image %q", imageID) + } + return imageID, nil + } + return "", errors.Wrapf(err, "error creating image to hold manifest list") +} + +// Reference returns an image reference for the composite image being built +// in the list, or an error if the list has never been saved to a local image. +func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) { + if l.instances[""] == "" { + return nil, errors.Wrap(ErrListImageUnknown, "error building reference to list") + } + s, err := is.Transport.ParseStoreReference(store, l.instances[""]) + if err != nil { + return nil, errors.Wrapf(err, "error creating ImageReference from image %q", l.instances[""]) + } + references := make([]types.ImageReference, 0, len(l.instances)) + whichInstances := make([]digest.Digest, 0, len(l.instances)) + switch multiple { + case cp.CopyAllImages, cp.CopySystemImage: + for instance := range l.instances { + if instance != "" { + whichInstances = append(whichInstances, instance) + } + } + case cp.CopySpecificImages: + for instance := range l.instances { + for _, allowed := range instances { + if instance == allowed { + whichInstances = append(whichInstances, instance) + } + } + } + } + for _, instance := range whichInstances { + imageName := l.instances[instance] + ref, err := alltransports.ParseImageName(imageName) + if err != nil { + return nil, errors.Wrapf(err, "error creating ImageReference from image %q", imageName) + } + references = append(references, ref) + } + return supplemented.Reference(s, references, multiple, instances), nil +} + +// Push saves the manifest list and whichever blobs are needed to a destination location. +func (l *list) Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) { + // Load the system signing policy. + pushPolicy, err := signature.DefaultPolicy(options.SystemContext) + if err != nil { + return nil, "", errors.Wrapf(err, "error obtaining default signature policy") + } + + // Override the settings for local storage to make sure that we can always read the source "image". + pushPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes + + policyContext, err := signature.NewPolicyContext(pushPolicy) + if err != nil { + return nil, "", errors.Wrapf(err, "error creating new signature policy context") + } + defer func() { + if err2 := policyContext.Destroy(); err2 != nil { + logrus.Errorf("error destroying signature policy context: %v", err2) + } + }() + + // If we were given a media type that corresponds to a multiple-images + // type, reset it to a valid corresponding single-image type, since we + // already expect the image library to infer the list type from the + // image type that we're telling it to force. + singleImageManifestType := options.ManifestType + switch singleImageManifestType { + case v1.MediaTypeImageIndex: + singleImageManifestType = v1.MediaTypeImageManifest + case manifest.DockerV2ListMediaType: + singleImageManifestType = manifest.DockerV2Schema2MediaType + } + + // Build a source reference for our list and grab bag full of blobs. + src, err := l.Reference(options.Store, options.ImageListSelection, options.Instances) + if err != nil { + return nil, "", err + } + copyOptions := &cp.Options{ + ImageListSelection: options.ImageListSelection, + Instances: options.Instances, + SourceCtx: options.SystemContext, + DestinationCtx: options.SystemContext, + ReportWriter: options.ReportWriter, + RemoveSignatures: options.RemoveSignatures, + SignBy: options.SignBy, + ForceManifestMIMEType: singleImageManifestType, + } + + // Copy whatever we were asked to copy. + manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) + if err != nil { + return nil, "", err + } + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, "", err + } + return nil, manifestDigest, nil +} + +// Add adds information about the specified image to the list, computing the +// image's manifest's digest, retrieving OS and architecture information from +// the image's configuration, and recording the image's reference so that it +// can be found at push-time. Returns the instanceDigest for the image. If +// the reference points to an image list, either all instances are added (if +// "all" is true), or the instance which matches "sys" (if "all" is false) will +// be added. +func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) { + src, err := ref.NewImageSource(ctx, sys) + if err != nil { + return "", errors.Wrapf(err, "error setting up to read manifest and configuration from %q", transports.ImageName(ref)) + } + defer src.Close() + + type instanceInfo struct { + instanceDigest *digest.Digest + OS, Architecture, OSVersion, Variant string + Features, OSFeatures, Annotations []string + Size int64 + } + var instanceInfos []instanceInfo + var manifestDigest digest.Digest + + primaryManifestBytes, primaryManifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return "", errors.Wrapf(err, "error reading manifest from %q", transports.ImageName(ref)) + } + + if manifest.MIMETypeIsMultiImage(primaryManifestType) { + lists, err := manifests.FromBlob(primaryManifestBytes) + if err != nil { + return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + } + if all { + for i, instance := range lists.OCIv1().Manifests { + platform := instance.Platform + if platform == nil { + platform = &v1.Platform{} + } + instanceDigest := instance.Digest + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), + OSFeatures: append([]string{}, platform.OSFeatures...), + Size: instance.Size, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + } else { + list, err := manifest.ListFromBlob(primaryManifestBytes, primaryManifestType) + if err != nil { + return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + } + instanceDigest, err := list.ChooseInstance(sys) + if err != nil { + return "", errors.Wrapf(err, "error selecting image from manifest list in %q", transports.ImageName(ref)) + } + added := false + for i, instance := range lists.OCIv1().Manifests { + if instance.Digest != instanceDigest { + continue + } + platform := instance.Platform + if platform == nil { + platform = &v1.Platform{} + } + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), + OSFeatures: append([]string{}, platform.OSFeatures...), + Size: instance.Size, + } + instanceInfos = append(instanceInfos, instanceInfo) + added = true + } + if !added { + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + } + } else { + instanceInfo := instanceInfo{ + instanceDigest: nil, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + + for _, instanceInfo := range instanceInfos { + if instanceInfo.OS == "" || instanceInfo.Architecture == "" { + img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest)) + if err != nil { + return "", errors.Wrapf(err, "error reading configuration blob from %q", transports.ImageName(ref)) + } + config, err := img.OCIConfig(ctx) + if err != nil { + return "", errors.Wrapf(err, "error reading info about config blob from %q", transports.ImageName(ref)) + } + if instanceInfo.OS == "" { + instanceInfo.OS = config.OS + } + if instanceInfo.Architecture == "" { + instanceInfo.Architecture = config.Architecture + } + } + manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) + if err != nil { + return "", errors.Wrapf(err, "error reading manifest from %q, instance %q", transports.ImageName(ref), instanceInfo.instanceDigest) + } + if instanceInfo.instanceDigest == nil { + manifestDigest, err = manifest.Digest(manifestBytes) + if err != nil { + return "", errors.Wrapf(err, "error computing digest of manifest from %q", transports.ImageName(ref)) + } + instanceInfo.instanceDigest = &manifestDigest + instanceInfo.Size = int64(len(manifestBytes)) + } else { + if manifestDigest == "" { + manifestDigest = *instanceInfo.instanceDigest + } + } + err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations) + if err != nil { + return "", errors.Wrapf(err, "error adding instance with digest %q", *instanceInfo.instanceDigest) + } + if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok { + l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref) + } + } + + return manifestDigest, nil +} + +// Remove filters out any instances in the list which match the specified digest. +func (l *list) Remove(instanceDigest digest.Digest) error { + err := l.List.Remove(instanceDigest) + if err == nil { + if _, needToDelete := l.instances[instanceDigest]; needToDelete { + delete(l.instances, instanceDigest) + } + } + return err +} diff --git a/vendor/github.com/containers/buildah/pkg/manifests/errors.go b/vendor/github.com/containers/buildah/pkg/manifests/errors.go new file mode 100644 index 000000000..8398d7efc --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/manifests/errors.go @@ -0,0 +1,16 @@ +package manifests + +import ( + "errors" +) + +var ( + // ErrDigestNotFound is returned when we look for an image instance + // with a particular digest in a list or index, and fail to find it. + ErrDigestNotFound = errors.New("no image instance matching the specified digest was found in the list or index") + // ErrManifestTypeNotSupported is returned when we attempt to parse a + // manifest with a known MIME type as a list or index, or when we attempt + // to serialize a list or index to a manifest with a MIME type that we + // don't know how to encode. + ErrManifestTypeNotSupported = errors.New("manifest type not supported") +) diff --git a/vendor/github.com/containers/buildah/pkg/manifests/manifests.go b/vendor/github.com/containers/buildah/pkg/manifests/manifests.go new file mode 100644 index 000000000..ea9495ee7 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/manifests/manifests.go @@ -0,0 +1,493 @@ +package manifests + +import ( + "encoding/json" + "os" + + "github.com/containers/image/v5/manifest" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// List is a generic interface for manipulating a manifest list or an image +// index. +type List interface { + AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, os, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error + Remove(instanceDigest digest.Digest) error + + SetURLs(instanceDigest digest.Digest, urls []string) error + URLs(instanceDigest digest.Digest) ([]string, error) + + SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error + Annotations(instanceDigest *digest.Digest) (map[string]string, error) + + SetOS(instanceDigest digest.Digest, os string) error + OS(instanceDigest digest.Digest) (string, error) + + SetArchitecture(instanceDigest digest.Digest, arch string) error + Architecture(instanceDigest digest.Digest) (string, error) + + SetOSVersion(instanceDigest digest.Digest, osVersion string) error + OSVersion(instanceDigest digest.Digest) (string, error) + + SetVariant(instanceDigest digest.Digest, variant string) error + Variant(instanceDigest digest.Digest) (string, error) + + SetFeatures(instanceDigest digest.Digest, features []string) error + Features(instanceDigest digest.Digest) ([]string, error) + + SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error + OSFeatures(instanceDigest digest.Digest) ([]string, error) + + Serialize(mimeType string) ([]byte, error) + Instances() []digest.Digest + OCIv1() *v1.Index + Docker() *manifest.Schema2List + + findDocker(instanceDigest digest.Digest) (*manifest.Schema2ManifestDescriptor, error) + findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) +} + +type list struct { + docker manifest.Schema2List + oci v1.Index +} + +// OCIv1 returns the list as a Docker schema 2 list. The returned structure should NOT be modified. +func (l *list) Docker() *manifest.Schema2List { + return &l.docker +} + +// OCIv1 returns the list as an OCI image index. The returned structure should NOT be modified. +func (l *list) OCIv1() *v1.Index { + return &l.oci +} + +// Create creates a new list. +func Create() List { + return &list{ + docker: manifest.Schema2List{ + SchemaVersion: 2, + MediaType: manifest.DockerV2ListMediaType, + }, + oci: v1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + }, + } +} + +// AddInstance adds an entry for the specified manifest digest, with assorted +// additional information specified in parameters, to the list or index. +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error { + if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) { + return err + } + + schema2platform := manifest.Schema2PlatformSpec{ + Architecture: architecture, + OS: osName, + OSVersion: osVersion, + OSFeatures: osFeatures, + Variant: variant, + Features: features, + } + l.docker.Manifests = append(l.docker.Manifests, manifest.Schema2ManifestDescriptor{ + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: manifestType, + Size: manifestSize, + Digest: manifestDigest, + }, + Platform: schema2platform, + }) + + ociv1platform := v1.Platform{ + Architecture: architecture, + OS: osName, + OSVersion: osVersion, + OSFeatures: osFeatures, + Variant: variant, + } + l.oci.Manifests = append(l.oci.Manifests, v1.Descriptor{ + MediaType: manifestType, + Size: manifestSize, + Digest: manifestDigest, + Platform: &ociv1platform, + }) + + return nil +} + +// Remove filters out any instances in the list which match the specified digest. +func (l *list) Remove(instanceDigest digest.Digest) error { + err := errors.Wrapf(os.ErrNotExist, "no instance matching digest %q found in manifest list", instanceDigest) + newDockerManifests := make([]manifest.Schema2ManifestDescriptor, 0, len(l.docker.Manifests)) + for i := range l.docker.Manifests { + if l.docker.Manifests[i].Digest != instanceDigest { + newDockerManifests = append(newDockerManifests, l.docker.Manifests[i]) + } else { + err = nil + } + } + l.docker.Manifests = newDockerManifests + newOCIv1Manifests := make([]v1.Descriptor, 0, len(l.oci.Manifests)) + for i := range l.oci.Manifests { + if l.oci.Manifests[i].Digest != instanceDigest { + newOCIv1Manifests = append(newOCIv1Manifests, l.oci.Manifests[i]) + } else { + err = nil + } + } + l.oci.Manifests = newOCIv1Manifests + return err +} + +func (l *list) findDocker(instanceDigest digest.Digest) (*manifest.Schema2ManifestDescriptor, error) { + for i := range l.docker.Manifests { + if l.docker.Manifests[i].Digest == instanceDigest { + return &l.docker.Manifests[i], nil + } + } + return nil, errors.Wrapf(ErrDigestNotFound, "no Docker manifest matching digest %q was found in list", instanceDigest.String()) +} + +func (l *list) findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) { + for i := range l.oci.Manifests { + if l.oci.Manifests[i].Digest == instanceDigest { + return &l.oci.Manifests[i], nil + } + } + return nil, errors.Wrapf(ErrDigestNotFound, "no OCI manifest matching digest %q was found in list", instanceDigest.String()) +} + +// SetURLs sets the URLs where the manifest might also be found. +func (l *list) SetURLs(instanceDigest digest.Digest, urls []string) error { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci.URLs = append([]string{}, urls...) + docker.URLs = append([]string{}, urls...) + return nil +} + +// URLs retrieves the locations from which this object might possibly be downloaded. +func (l *list) URLs(instanceDigest digest.Digest) ([]string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, oci.URLs...), nil +} + +// SetAnnotations sets annotations on the image index, or on a specific manifest. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error { + a := &l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return err + } + a = &oci.Annotations + } + (*a) = make(map[string]string) + for k, v := range annotations { + (*a)[k] = v + } + return nil +} + +// Annotations retrieves the annotations which have been set on the image index, or on one instance. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) Annotations(instanceDigest *digest.Digest) (map[string]string, error) { + a := l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return nil, err + } + a = oci.Annotations + } + annotations := make(map[string]string) + for k, v := range a { + annotations[k] = v + } + return annotations, nil +} + +// SetOS sets the OS field in the platform information associated with the instance with the specified digest. +func (l *list) SetOS(instanceDigest digest.Digest, os string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OS = os + oci.Platform.OS = os + return nil +} + +// OS retrieves the OS field in the platform information associated with the instance with the specified digest. +func (l *list) OS(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.OS, nil +} + +// SetArchitecture sets the Architecture field in the platform information associated with the instance with the specified digest. +func (l *list) SetArchitecture(instanceDigest digest.Digest, arch string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.Architecture = arch + oci.Platform.Architecture = arch + return nil +} + +// Architecture retrieves the Architecture field in the platform information associated with the instance with the specified digest. +func (l *list) Architecture(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.Architecture, nil +} + +// SetOSVersion sets the OSVersion field in the platform information associated with the instance with the specified digest. +func (l *list) SetOSVersion(instanceDigest digest.Digest, osVersion string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OSVersion = osVersion + oci.Platform.OSVersion = osVersion + return nil +} + +// OSVersion retrieves the OSVersion field in the platform information associated with the instance with the specified digest. +func (l *list) OSVersion(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.OSVersion, nil +} + +// SetVariant sets the Variant field in the platform information associated with the instance with the specified digest. +func (l *list) SetVariant(instanceDigest digest.Digest, variant string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.Variant = variant + oci.Platform.Variant = variant + return nil +} + +// Variant retrieves the Variant field in the platform information associated with the instance with the specified digest. +func (l *list) Variant(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.Variant, nil +} + +// SetFeatures sets the features list in the platform information associated with the instance with the specified digest. +// The field is specific to the Docker manifest list format, and is not present in OCI's image indexes. +func (l *list) SetFeatures(instanceDigest digest.Digest, features []string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + docker.Platform.Features = append([]string{}, features...) + // no OCI equivalent + return nil +} + +// Features retrieves the features list from the platform information associated with the instance with the specified digest. +// The field is specific to the Docker manifest list format, and is not present in OCI's image indexes. +func (l *list) Features(instanceDigest digest.Digest) ([]string, error) { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, docker.Platform.Features...), nil +} + +// SetOSFeatures sets the OS features list in the platform information associated with the instance with the specified digest. +func (l *list) SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OSFeatures = append([]string{}, osFeatures...) + oci.Platform.OSFeatures = append([]string{}, osFeatures...) + return nil +} + +// OSFeatures retrieves the OS features list from the platform information associated with the instance with the specified digest. +func (l *list) OSFeatures(instanceDigest digest.Digest) ([]string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, oci.Platform.OSFeatures...), nil +} + +// FromBlob builds a list from an encoded manifest list or image index. +func FromBlob(manifestBytes []byte) (List, error) { + manifestType := manifest.GuessMIMEType(manifestBytes) + list := &list{ + docker: manifest.Schema2List{ + SchemaVersion: 2, + MediaType: manifest.DockerV2ListMediaType, + }, + oci: v1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + }, + } + switch manifestType { + default: + return nil, errors.Wrapf(ErrManifestTypeNotSupported, "unable to load manifest list: unsupported format %q", manifestType) + case manifest.DockerV2ListMediaType: + if err := json.Unmarshal(manifestBytes, &list.docker); err != nil { + return nil, errors.Wrapf(err, "unable to parse Docker manifest list from image") + } + for _, m := range list.docker.Manifests { + list.oci.Manifests = append(list.oci.Manifests, v1.Descriptor{ + MediaType: m.Schema2Descriptor.MediaType, + Size: m.Schema2Descriptor.Size, + Digest: m.Schema2Descriptor.Digest, + Platform: &v1.Platform{ + Architecture: m.Platform.Architecture, + OS: m.Platform.OS, + OSVersion: m.Platform.OSVersion, + OSFeatures: m.Platform.OSFeatures, + Variant: m.Platform.Variant, + }, + }) + } + case v1.MediaTypeImageIndex: + if err := json.Unmarshal(manifestBytes, &list.oci); err != nil { + return nil, errors.Wrapf(err, "unable to parse OCIv1 manifest list") + } + for _, m := range list.oci.Manifests { + platform := m.Platform + if platform == nil { + platform = &v1.Platform{} + } + list.docker.Manifests = append(list.docker.Manifests, manifest.Schema2ManifestDescriptor{ + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: m.MediaType, + Size: m.Size, + Digest: m.Digest, + }, + Platform: manifest.Schema2PlatformSpec{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: platform.Variant, + }, + }) + } + } + return list, nil +} + +func (l *list) preferOCI() bool { + // If we have any data that's only in the OCI format, use that. + for _, m := range l.oci.Manifests { + if len(m.URLs) > 0 { + return true + } + if len(m.Annotations) > 0 { + return true + } + } + // If we have any data that's only in the Docker format, use that. + for _, m := range l.docker.Manifests { + if len(m.Platform.Features) > 0 { + return false + } + } + // If we have no manifests, remember that the Docker format is + // explicitly typed, so use that. Otherwise, default to using the OCI + // format. + return len(l.docker.Manifests) != 0 +} + +// Serialize encodes the list using the specified format, or by selecting one +// which it thinks is appropriate. +func (l *list) Serialize(mimeType string) ([]byte, error) { + var manifestBytes []byte + switch mimeType { + case "": + if l.preferOCI() { + manifest, err := json.Marshal(&l.oci) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling OCI image index") + } + manifestBytes = manifest + } else { + manifest, err := json.Marshal(&l.docker) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + } + manifestBytes = manifest + } + case v1.MediaTypeImageIndex: + manifest, err := json.Marshal(&l.oci) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling OCI image index") + } + manifestBytes = manifest + case manifest.DockerV2ListMediaType: + manifest, err := json.Marshal(&l.docker) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + } + manifestBytes = manifest + default: + return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType) + } + return manifestBytes, nil +} + +// Instances returns the list of image instances mentioned in this list. +func (l *list) Instances() []digest.Digest { + instances := make([]digest.Digest, 0, len(l.oci.Manifests)) + for _, instance := range l.oci.Manifests { + instances = append(instances, instance.Digest) + } + return instances +} diff --git a/vendor/github.com/containers/buildah/pkg/supplemented/errors.go b/vendor/github.com/containers/buildah/pkg/supplemented/errors.go new file mode 100644 index 000000000..6de679b50 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/supplemented/errors.go @@ -0,0 +1,17 @@ +package supplemented + +import ( + "errors" + + "github.com/containers/buildah/pkg/manifests" +) + +var ( + // ErrDigestNotFound is returned when we look for an image instance + // with a particular digest in a list or index, and fail to find it. + ErrDigestNotFound = manifests.ErrDigestNotFound + // ErrBlobNotFound is returned when try to figure out which supplemental + // image we should ask for a blob with the specified characteristics, + // based on the information in each of the supplemental images' manifests. + ErrBlobNotFound = errors.New("location of blob could not be determined") +) diff --git a/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go b/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go new file mode 100644 index 000000000..5e3c6291a --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go @@ -0,0 +1,400 @@ +package supplemented + +import ( + "container/list" + "context" + "io" + + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + multierror "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// supplementedImageReference groups multiple references together. +type supplementedImageReference struct { + types.ImageReference + references []types.ImageReference + multiple cp.ImageListSelection + instances []digest.Digest +} + +// supplementedImageSource represents an image, plus all of the blobs of other images. +type supplementedImageSource struct { + types.ImageSource + reference types.ImageReference + manifest []byte // The manifest list or image index. + manifestType string // The MIME type of the manifest list or image index. + sourceDefaultInstances map[types.ImageSource]digest.Digest // The default manifest instances of open ImageSource objects. + sourceInstancesByInstance map[digest.Digest]types.ImageSource // A map from manifest instance digests to open ImageSource objects. + instancesByBlobDigest map[digest.Digest]digest.Digest // A map from blob digests to manifest instance digests. +} + +// Reference groups one reference and some number of additional references +// together as a group. The first reference's default instance will be treated +// as the default instance of the resulting reference, with the other +// references' instances made available as instances for their respective +// digests. +func Reference(ref types.ImageReference, supplemental []types.ImageReference, multiple cp.ImageListSelection, instances []digest.Digest) types.ImageReference { + if len(instances) > 0 { + i := make([]digest.Digest, len(instances)) + copy(i, instances) + instances = i + } + return &supplementedImageReference{ + ImageReference: ref, + references: append([]types.ImageReference{}, supplemental...), + multiple: multiple, + instances: instances, + } +} + +// NewImage returns a new higher-level view of the image. +func (s *supplementedImageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := s.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error building a new Image using an ImageSource") + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource opens the referenced images, scans their manifests for +// instances, and builds mappings from each blob mentioned in them to their +// instances. +func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (iss types.ImageSource, err error) { + sources := make(map[digest.Digest]types.ImageSource) + defaultInstances := make(map[types.ImageSource]digest.Digest) + instances := make(map[digest.Digest]digest.Digest) + var sis *supplementedImageSource + + // Open the default instance for reading. + top, err := s.ImageReference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(s.ImageReference)) + } + + defer func() { + if err != nil { + if iss != nil { + // The composite source has been created. Use its Close method. + if err2 := iss.Close(); err2 != nil { + logrus.Errorf("error opening image: %v", err2) + } + } else if top != nil { + // The composite source has not been created, but the top was already opened. Close it. + if err2 := top.Close(); err2 != nil { + logrus.Errorf("error opening image: %v", err2) + } + } + } + }() + + var addSingle, addMulti func(manifestBytes []byte, manifestType string, src types.ImageSource) error + type manifestToRead struct { + src types.ImageSource + instance *digest.Digest + } + manifestsToRead := list.New() + + addSingle = func(manifestBytes []byte, manifestType string, src types.ImageSource) error { + // Mark this instance as being associated with this ImageSource. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing digest over manifest %q", string(manifestBytes)) + } + sources[manifestDigest] = src + + // Parse the manifest as a single image. + man, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return errors.Wrapf(err, "error parsing manifest %q", string(manifestBytes)) + } + + // Log the config blob's digest and the blobs of its layers as associated with this manifest. + config := man.ConfigInfo() + if config.Digest != "" { + instances[config.Digest] = manifestDigest + logrus.Debugf("blob %q belongs to %q", config.Digest, manifestDigest) + } + + layers := man.LayerInfos() + for _, layer := range layers { + instances[layer.Digest] = manifestDigest + logrus.Debugf("layer %q belongs to %q", layer.Digest, manifestDigest) + } + + return nil + } + + addMulti = func(manifestBytes []byte, manifestType string, src types.ImageSource) error { + // Mark this instance as being associated with this ImageSource. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing manifest digest") + } + sources[manifestDigest] = src + + // Parse the manifest as a list of images. + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(manifestBytes), manifestType) + } + + // Figure out which of its instances we want to look at. + var chaseInstances []digest.Digest + switch s.multiple { + case cp.CopySystemImage: + instance, err := list.ChooseInstance(sys) + if err != nil { + return errors.Wrapf(err, "error selecting appropriate instance from list") + } + chaseInstances = []digest.Digest{instance} + case cp.CopySpecificImages: + chaseInstances = s.instances + case cp.CopyAllImages: + chaseInstances = list.Instances() + } + + // Queue these manifest instances for reading from this + // ImageSource later, if we don't stumble across them somewhere + // else first. + for _, instanceIterator := range chaseInstances { + instance := instanceIterator + next := &manifestToRead{ + src: src, + instance: &instance, + } + if src == top { + // Prefer any other source. + manifestsToRead.PushBack(next) + } else { + // Prefer this source over the first ("main") one. + manifestsToRead.PushFront(next) + } + } + return nil + } + + visitedReferences := make(map[types.ImageReference]struct{}) + for i, ref := range append([]types.ImageReference{s.ImageReference}, s.references...) { + if _, visited := visitedReferences[ref]; visited { + continue + } + visitedReferences[ref] = struct{}{} + + // Open this image for reading. + var src types.ImageSource + if ref == s.ImageReference { + src = top + } else { + src, err = ref.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(ref)) + } + } + + // Read the default manifest for the image. + manifestBytes, manifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error reading default manifest from image %q", transports.ImageName(ref)) + } + + // If this is the first image, mark it as our starting point. + if i == 0 { + sources[""] = src + + sis = &supplementedImageSource{ + ImageSource: top, + reference: s, + manifest: manifestBytes, + manifestType: manifestType, + sourceDefaultInstances: defaultInstances, + sourceInstancesByInstance: sources, + instancesByBlobDigest: instances, + } + iss = sis + } + + // Record the digest of the ImageSource's default instance's manifest. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "error computing digest of manifest from image %q", transports.ImageName(ref)) + } + sis.sourceDefaultInstances[src] = manifestDigest + + // If the ImageSource's default manifest is a list, parse each of its instances. + if manifest.MIMETypeIsMultiImage(manifestType) { + if err = addMulti(manifestBytes, manifestType, src); err != nil { + return nil, errors.Wrapf(err, "error adding multi-image %q", transports.ImageName(ref)) + } + } else { + if err = addSingle(manifestBytes, manifestType, src); err != nil { + return nil, errors.Wrapf(err, "error adding single image %q", transports.ImageName(ref)) + } + } + } + + // Parse the rest of the instances. + for manifestsToRead.Front() != nil { + front := manifestsToRead.Front() + value := front.Value + manifestToRead, ok := value.(*manifestToRead) + if !ok { + panic("bug: wrong type looking for *manifestToRead in list?") + } + manifestsToRead.Remove(front) + + // If we already read this manifest, no need to read it again. + if _, alreadyRead := sources[*manifestToRead.instance]; alreadyRead { + continue + } + + // Read the instance's manifest. + manifestBytes, manifestType, err := manifestToRead.src.GetManifest(ctx, manifestToRead.instance) + if err != nil { + // if errors.Cause(err) == storage.ErrImageUnknown || os.IsNotExist(errors.Cause(err)) { + // Trust that we either don't need it, or that it's in another reference. + // continue + // } + return nil, errors.Wrapf(err, "error reading manifest for instance %q", manifestToRead.instance) + } + + if manifest.MIMETypeIsMultiImage(manifestType) { + // Add the list's contents. + if err = addMulti(manifestBytes, manifestType, manifestToRead.src); err != nil { + return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + } + } else { + // Add the single image's contents. + if err = addSingle(manifestBytes, manifestType, manifestToRead.src); err != nil { + return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + } + } + } + + return iss, nil +} + +func (s *supplementedImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("deletion of images not implemented") +} + +func (s *supplementedImageSource) Close() error { + var returnErr *multierror.Error + closed := make(map[types.ImageSource]struct{}) + for _, sourceInstance := range s.sourceInstancesByInstance { + if _, closed := closed[sourceInstance]; closed { + continue + } + if err := sourceInstance.Close(); err != nil { + returnErr = multierror.Append(returnErr, err) + } + closed[sourceInstance] = struct{}{} + } + if returnErr == nil { + return nil + } + return returnErr.ErrorOrNil() +} + +func (s *supplementedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + requestInstanceDigest := instanceDigest + if instanceDigest == nil { + return s.manifest, s.manifestType, nil + } + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + if *instanceDigest == s.sourceDefaultInstances[sourceInstance] { + requestInstanceDigest = nil + } + return sourceInstance.GetManifest(ctx, requestInstanceDigest) + } + return nil, "", errors.Wrapf(ErrDigestNotFound, "error getting manifest for digest %q", *instanceDigest) +} + +func (s *supplementedImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, bic types.BlobInfoCache) (io.ReadCloser, int64, error) { + sourceInstance, ok := s.instancesByBlobDigest[blob.Digest] + if !ok { + return nil, -1, errors.Wrapf(ErrBlobNotFound, "error blob %q in known instances", blob.Digest) + } + src, ok := s.sourceInstancesByInstance[sourceInstance] + if !ok { + return nil, -1, errors.Wrapf(ErrDigestNotFound, "error getting image source for instance %q", sourceInstance) + } + return src.GetBlob(ctx, blob, bic) +} + +func (s *supplementedImageSource) HasThreadSafeGetBlob() bool { + checked := make(map[types.ImageSource]struct{}) + for _, sourceInstance := range s.sourceInstancesByInstance { + if _, checked := checked[sourceInstance]; checked { + continue + } + if !sourceInstance.HasThreadSafeGetBlob() { + return false + } + checked[sourceInstance] = struct{}{} + } + return true +} + +func (s *supplementedImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var ( + src types.ImageSource + digest digest.Digest + ) + requestInstanceDigest := instanceDigest + if instanceDigest == nil { + if sourceInstance, ok := s.sourceInstancesByInstance[""]; ok { + src = sourceInstance + } + } else { + digest = *instanceDigest + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + src = sourceInstance + } + if *instanceDigest == s.sourceDefaultInstances[src] { + requestInstanceDigest = nil + } + } + if src != nil { + return src.GetSignatures(ctx, requestInstanceDigest) + } + return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to read signatures", digest) +} + +func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + var src types.ImageSource + requestInstanceDigest := instanceDigest + if instanceDigest == nil { + if sourceInstance, ok := s.sourceInstancesByInstance[""]; ok { + src = sourceInstance + } + } else { + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + src = sourceInstance + } + if *instanceDigest == s.sourceDefaultInstances[src] { + requestInstanceDigest = nil + } + } + if src != nil { + blobInfos, err := src.LayerInfosForCopy(ctx, requestInstanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer infos for copy from instance %q", instanceDigest) + } + var manifestDigest digest.Digest + if instanceDigest != nil { + manifestDigest = *instanceDigest + } + for _, blobInfo := range blobInfos { + s.instancesByBlobDigest[blobInfo.Digest] = manifestDigest + } + return blobInfos, nil + } + return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to copy layers", *instanceDigest) +} diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index c807441cf..0d92a1028 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.16.3 +1.16.5 diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index 84bfc9616..05c1450c0 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -5,8 +5,8 @@ require ( github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 github.com/Microsoft/hcsshim v0.8.7 github.com/docker/go-units v0.4.0 - github.com/klauspost/compress v1.10.2 - github.com/klauspost/pgzip v1.2.1 + github.com/klauspost/compress v1.10.3 + github.com/klauspost/pgzip v1.2.2 github.com/mattn/go-shellwords v1.0.10 github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/opencontainers/go-digest v1.0.0-rc1 diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index 5d86e062f..30183eb00 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -39,8 +39,12 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.2 h1:8d4I0LDiieuGngsqlqOih9ker/NS0LX4V0i+EhiFWg0= +github.com/klauspost/pgzip v1.2.2/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw= diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index dc2e0c199..a188c510d 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -1,7 +1,6 @@ package fileutils import ( - "errors" "fmt" "io" "os" @@ -10,6 +9,7 @@ import ( "strings" "text/scanner" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -358,6 +358,21 @@ func ReadSymlinkedDirectory(path string) (string, error) { return realPath, nil } +// ReadSymlinkedPath returns the target directory of a symlink. +// The target of the symbolic link can be a file and a directory. +func ReadSymlinkedPath(path string) (realPath string, err error) { + if realPath, err = filepath.Abs(path); err != nil { + return "", errors.Wrapf(err, "unable to get absolute path for %q", path) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", errors.Wrapf(err, "failed to canonicalise path for %q", path) + } + if _, err := os.Stat(realPath); err != nil { + return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path) + } + return realPath, nil +} + // CreateIfNotExists creates a file or a directory only if it does not already exist. func CreateIfNotExists(path string, isDir bool) error { if _, err := os.Stat(path); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go index 4276d63af..4b888dceb 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mount.go +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -56,10 +56,11 @@ func Mounted(mountpoint string) (bool, error) { return false, err } - mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint) + mountpoint, err = fileutils.ReadSymlinkedPath(mountpoint) if err != nil { return false, err } + // Search the table for the mountpoint for _, e := range entries { if e.Mountpoint == mountpoint { diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go index 26c8b42c1..a9a0dd751 100644 --- a/vendor/github.com/containers/storage/pkg/system/process_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -20,5 +20,5 @@ func IsProcessAlive(pid int) bool { // KillProcess force-stops a process. func KillProcess(pid int) { - unix.Kill(pid, unix.SIGKILL) + _ = unix.Kill(pid, unix.SIGKILL) } diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index 618c1dc75..510e71428 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -7,6 +7,7 @@ import ( "github.com/containers/storage/pkg/mount" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can @@ -29,7 +30,9 @@ func EnsureRemoveAll(dir string) error { maxRetry := 100 // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) + if err := mount.RecursiveUnmount(dir); err != nil { + logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) + } for { err := os.RemoveAll(dir) diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 49699b263..9ff84c666 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -3308,6 +3308,9 @@ const defaultConfigFile = "/etc/containers/storage.conf" // DefaultConfigFile returns the path to the storage config file used func DefaultConfigFile(rootless bool) (string, error) { if rootless { + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, "containers/storage.conf"), nil + } home := homedir.Get() if home == "" { return "", errors.New("cannot determine user's homedir") diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index f1e94fd2b..406032961 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -10,7 +10,6 @@ import ( "strconv" "strings" - "github.com/BurntSushi/toml" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" @@ -158,23 +157,6 @@ func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) { return opts, nil } -func getTomlStorage(storeOptions *StoreOptions) *tomlConfig { - config := new(tomlConfig) - - config.Storage.Driver = storeOptions.GraphDriverName - config.Storage.RunRoot = storeOptions.RunRoot - config.Storage.GraphRoot = storeOptions.GraphRoot - config.Storage.RootlessStoragePath = storeOptions.RootlessStoragePath - for _, i := range storeOptions.GraphDriverOptions { - s := strings.Split(i, "=") - if s[0] == "overlay.mount_program" { - config.Storage.Options.MountProgram = s[1] - } - } - - return config -} - func getRootlessUID() int { uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") if uidEnv != "" { @@ -244,23 +226,6 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { rootlessStoragePath = strings.Replace(rootlessStoragePath, "$USER", usr.Username, -1) storageOpts.GraphRoot = rootlessStoragePath } - } else { - if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { - return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf)) - } - file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) - if err != nil { - return storageOpts, errors.Wrapf(err, "cannot open %s", storageConf) - } - - tomlConfiguration := getTomlStorage(&storageOpts) - defer file.Close() - enc := toml.NewEncoder(file) - if err := enc.Encode(tomlConfiguration); err != nil { - os.Remove(storageConf) - - return storageOpts, errors.Wrapf(err, "failed to encode %s", storageConf) - } } } return storageOpts, nil diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 73ac3c630..86553c2c3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -66,7 +66,7 @@ var ( // A Decoder can be used in two modes: // // 1) As a stream, or -// 2) For stateless decoding using DecodeAll or DecodeBuffer. +// 2) For stateless decoding using DecodeAll. // // Only a single stream can be decoded concurrently, but the same decoder // can run multiple concurrent stateless decodes. It is even possible to diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 000000000..4375e08b4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,521 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + stepSize := int32(e.o.targetLength) + if stepSize == 0 { + stepSize++ + } + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.Encode(blk, src) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 0ffea7655..d640e6a9f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -172,55 +172,6 @@ encodeLoop: cv = load6432(src, s) continue } - const repOff2 = 1 - // We deviate from the reference encoder and also check offset 2. - // Slower and not consistently better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff2*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff2, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } } // Find the offsets of our two matches. coffsetL := s - (candidateL.offset - e.cur) @@ -372,7 +323,7 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashS := hash5(cv, dFastShortTableBits) nextHashL := hash8(cv, dFastLongTableBits) // We have at least 4 byte match. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 28134b158..1387b8082 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -6,6 +6,7 @@ package zstd import ( "fmt" + "math" "math/bits" "github.com/klauspost/compress/zstd/internal/xxhash" @@ -23,7 +24,7 @@ type tableEntry struct { offset int32 } -type fastEncoder struct { +type fastBase struct { o encParams // cur is the offset at the start of hist cur int32 @@ -31,18 +32,22 @@ type fastEncoder struct { maxMatchOff int32 hist []byte crc *xxhash.Digest - table [tableSize]tableEntry tmp [8]byte blk *blockEnc } +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + // CRC returns the underlying CRC writer. -func (e *fastEncoder) CRC() *xxhash.Digest { +func (e *fastBase) CRC() *xxhash.Digest { return e.crc } // AppendCRC will append the CRC to the destination slice and return it. -func (e *fastEncoder) AppendCRC(dst []byte) []byte { +func (e *fastBase) AppendCRC(dst []byte) []byte { crc := e.crc.Sum(e.tmp[:0]) dst = append(dst, crc[7], crc[6], crc[5], crc[4]) return dst @@ -50,7 +55,7 @@ func (e *fastEncoder) AppendCRC(dst []byte) []byte { // WindowSize returns the window size of the encoder, // or a window size small enough to contain the input size, if > 0. -func (e *fastEncoder) WindowSize(size int) int32 { +func (e *fastBase) WindowSize(size int) int32 { if size > 0 && size < int(e.maxMatchOff) { b := int32(1) << uint(bits.Len(uint(size))) // Keep minimum window. @@ -63,7 +68,7 @@ func (e *fastEncoder) WindowSize(size int) int32 { } // Block returns the current block. -func (e *fastEncoder) Block() *blockEnc { +func (e *fastBase) Block() *blockEnc { return e.blk } @@ -169,9 +174,22 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+6, repIndex+4, src) + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -197,11 +215,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + 2 + s += length + 2 nextEmit = s if s >= sLimit { if debug { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -257,7 +275,20 @@ encodeLoop: } // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } // Extend backwards tMin := s - e.maxMatchOff @@ -294,7 +325,20 @@ encodeLoop: if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } // Store this, since we have it. nextHash := hash6(cv, hashLog) @@ -412,10 +456,23 @@ encodeLoop: if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - // lenght := 4 + e.matchlen(s+6, repIndex+4, src) - lenght := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -441,11 +498,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + 2 + s += length + 2 nextEmit = s if s >= sLimit { if debug { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -498,7 +555,20 @@ encodeLoop: // Extend the 4-byte match as long as possible. //l := e.matchlenNoHist(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } // Extend backwards tMin := s - e.maxMatchOff @@ -536,7 +606,20 @@ encodeLoop: // We have at least 4 byte match. // No need to check backwards. We come straight from a match //l := 4 + e.matchlenNoHist(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } // Store this, since we have it. nextHash := hash6(cv, hashLog) @@ -571,7 +654,7 @@ encodeLoop: } } -func (e *fastEncoder) addBlock(src []byte) int32 { +func (e *fastBase) addBlock(src []byte) int32 { if debugAsserts && e.cur > bufferReset { panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) } @@ -602,17 +685,17 @@ func (e *fastEncoder) addBlock(src []byte) int32 { // useBlock will replace the block with the provided one, // but transfer recent offsets from the previous. -func (e *fastEncoder) UseBlock(enc *blockEnc) { +func (e *fastBase) UseBlock(enc *blockEnc) { enc.reset(e.blk) e.blk = enc } -func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 { +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { // Extend the match to be as long as possible. return int32(matchLen(src[s:], src[t:])) } -func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 { +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { if debugAsserts { if s < 0 { err := fmt.Sprintf("s (%d) < 0", s) @@ -626,18 +709,17 @@ func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 { err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) panic(err) } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } } // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. -func (e *fastEncoder) Reset() { +func (e *fastBase) Reset() { if e.blk == nil { e.blk = &blockEnc{} e.blk.init() diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 4032fb9fc..67d45efb9 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -71,15 +71,14 @@ func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { } if w != nil { e.Reset(w) - } else { - e.init.Do(func() { - e.initialize() - }) } return &e, nil } func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } e.encoders = make(chan encoder, e.o.concurrent) for i := 0; i < e.o.concurrent; i++ { e.encoders <- e.o.encoder() @@ -89,9 +88,6 @@ func (e *Encoder) initialize() { // Reset will re-initialize the writer and new writes will encode to the supplied writer // as a new, independent stream. func (e *Encoder) Reset(w io.Writer) { - e.init.Do(func() { - e.initialize() - }) s := &e.state s.wg.Wait() s.wWg.Wait() @@ -422,10 +418,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(func() { - e.o.setDefault() - e.initialize() - }) + e.init.Do(e.initialize) enc := <-e.encoders defer func() { // Release encoder reference to last block. diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 40eb45733..0ff970dac 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -39,9 +39,11 @@ func (o *encoderOptions) setDefault() { func (o encoderOptions) encoder() encoder { switch o.level { case SpeedDefault: - return &doubleFastEncoder{fastEncoder: fastEncoder{maxMatchOff: int32(o.windowSize)}} + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + case SpeedBetterCompression: + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} case SpeedFastest: - return &fastEncoder{maxMatchOff: int32(o.windowSize)} + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} } panic("unknown compression level") } @@ -67,7 +69,7 @@ func WithEncoderConcurrency(n int) EOption { } // WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between WindowSizeMin and WindowSizeMax. +// The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. // The default value is determined by the compression level. @@ -130,18 +132,18 @@ const ( // This is roughly equivalent to the default Zstandard mode (level 3). SpeedDefault + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + // speedLast should be kept as the last actual compression option. // The is not for external usage, but is used to keep track of the valid options. speedLast - // SpeedBetterCompression will (in the future) yield better compression than the default, - // but at approximately 4x the CPU usage of the default. - // For now this is not implemented. - SpeedBetterCompression = SpeedDefault - // SpeedBestCompression will choose the best available compression option. // For now this is not implemented. - SpeedBestCompression = SpeedDefault + SpeedBestCompression = SpeedBetterCompression ) // EncoderLevelFromString will convert a string representation of an encoding level back @@ -163,8 +165,10 @@ func EncoderLevelFromZstd(level int) EncoderLevel { switch { case level < 3: return SpeedFastest - case level >= 3: + case level >= 3 && level < 6: return SpeedDefault + case level > 5: + return SpeedBetterCompression } return SpeedDefault } @@ -176,6 +180,8 @@ func (e EncoderLevel) String() string { return "fastest" case SpeedDefault: return "default" + case SpeedBetterCompression: + return "better" default: return "invalid" } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 5e0b64ccc..0807719c8 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -87,6 +87,17 @@ func printf(format string, a ...interface{}) { } } +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + // matchLen returns the maximum length. // a must be the shortest of the two. // The function also returns whether all bytes matched. @@ -97,33 +108,18 @@ func matchLen(a, b []byte) int { return i + (bits.TrailingZeros64(diff) >> 3) } } + checked := (len(a) >> 3) << 3 a = a[checked:] b = b[checked:] - // TODO: We could do a 4 check. for i := range a { if a[i] != b[i] { - return int(i) + checked + return i + checked } } return len(a) + checked } -// matchLen returns a match length in src between index s and t -func matchLenIn(src []byte, s, t int32) int32 { - s1 := len(src) - b := src[t:] - a := src[s:s1] - b = b[:len(a)] - // Extend the match to be as long as possible. - for i := range a { - if a[i] != b[i] { - return int32(i) - } - } - return int32(len(a)) -} - func load3232(b []byte, i int32) uint32 { // Help the compiler eliminate bounds checks on the read so it can be done in a single read. b = b[i:] diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md index 81000996c..171b978fd 100644 --- a/vendor/github.com/klauspost/pgzip/README.md +++ b/vendor/github.com/klauspost/pgzip/README.md @@ -39,7 +39,6 @@ You might need to get/update the dependencies: ``` go get -u github.com/klauspost/compress -go get -u github.com/klauspost/crc32 ``` Usage @@ -65,7 +64,7 @@ Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress ## Compression The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip). -To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(250000, 16), meaning blocks are split at 250000 bytes and up to 16 blocks can be processing at once before the writer blocks. +To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(1MB, runtime.GOMAXPROCS(0)), meaning blocks are split at 1 MB and up to the number of CPU threads blocks can be processing at once before the writer blocks. Example: @@ -99,19 +98,19 @@ See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gz Compression cost is usually about 0.2% with default settings with a block size of 250k. -Example with GOMAXPROC set to 8 (quad core with 8 hyperthreads) +Example with GOMAXPROC set to 32 (16 core CPU) Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6. Compressor | MB/sec | speedup | size | size overhead (lower=better) ------------|----------|---------|------|--------- -[gzip](http://golang.org/pkg/compress/gzip) (golang) | 7.21MB/s | 1.0x | 4786608902 | 0% -[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 10.98MB/s | 1.52x | 4781331645 | -0.11% -[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 50.76MB/s|7.04x | 4784121440 | -0.052% -[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 38.65MB/s | 5.36x | 4924899484 | 2.889% -[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 32.00MB/s | 4.44x | 4791226567 | 0.096% +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 15.44MB/s (1 thread) | 1.0x | 4781329307 | 0% +[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 135.04MB/s (1 thread) | 8.74x | 4894858258 | +2.37% +[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 1573.23MB/s| 101.9x | 4902285651 | +2.53% +[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 361.40MB/s | 23.4x | 4869686090 | +1.85% +[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 306.01MB/s | 19.8x | 4786890417 | +0.12% -pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression) mode, that will allow compression at ~150MB per core per second, independent of the content. +pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression-huffman-only) mode, that will allow compression at ~250MB per core per second, independent of the content. See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings. diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go index 85d14e9cb..bb2e33941 100644 --- a/vendor/github.com/klauspost/pgzip/gzip.go +++ b/vendor/github.com/klauspost/pgzip/gzip.go @@ -11,6 +11,7 @@ import ( "hash" "hash/crc32" "io" + "runtime" "sync" "time" @@ -18,9 +19,9 @@ import ( ) const ( - defaultBlockSize = 256 << 10 + defaultBlockSize = 1 << 20 tailSize = 16384 - defaultBlocks = 16 + defaultBlocks = 4 ) // These constants are copied from the flate package, so that code that imports @@ -68,8 +69,8 @@ type result struct { // With this you can control the approximate size of your blocks, // as well as how many you want to be processing in parallel. // -// Default values for this is SetConcurrency(250000, 16), -// meaning blocks are split at 250000 bytes and up to 16 blocks +// Default values for this is SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)), +// meaning blocks are split at 1 MB and up to the number of CPU threads // can be processing at once before the writer blocks. func (z *Writer) SetConcurrency(blockSize, blocks int) error { if blockSize <= tailSize { @@ -115,7 +116,7 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, error) { return nil, fmt.Errorf("gzip: invalid compression level: %d", level) } z := new(Writer) - z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)) z.init(w, level) return z, nil } @@ -174,7 +175,7 @@ func (z *Writer) Reset(w io.Writer) { if z.results != nil && !z.closed { close(z.results) } - z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)) z.init(w, z.level) } @@ -239,36 +240,36 @@ func (z *Writer) writeString(s string) (err error) { // compressCurrent will compress the data currently buffered // This should only be called from the main writer/flush/closer func (z *Writer) compressCurrent(flush bool) { + c := z.currentBuffer + if len(c) > z.blockSize { + // This can never happen through the public interface. + panic("len(z.currentBuffer) > z.blockSize (most likely due to concurrent Write race)") + } + r := result{} r.result = make(chan []byte, 1) r.notifyWritten = make(chan struct{}, 0) + // Reserve a result slot select { case z.results <- r: case <-z.pushedErr: return } - // If block given is more than twice the block size, split it. - c := z.currentBuffer - if len(c) > z.blockSize*2 { - c = c[:z.blockSize] - z.wg.Add(1) - go z.compressBlock(c, z.prevTail, r, false) - z.prevTail = c[len(c)-tailSize:] - z.currentBuffer = z.currentBuffer[z.blockSize:] - z.compressCurrent(flush) - // Last one flushes if needed - return - } - z.wg.Add(1) - go z.compressBlock(c, z.prevTail, r, z.closed) + tail := z.prevTail if len(c) > tailSize { - z.prevTail = c[len(c)-tailSize:] + buf := z.dstPool.Get().([]byte) // Put in .compressBlock + // Copy tail from current buffer before handing the buffer over to the + // compressBlock goroutine. + buf = append(buf[:0], c[len(c)-tailSize:]...) + z.prevTail = buf } else { z.prevTail = nil } - z.currentBuffer = z.dstPool.Get().([]byte) + go z.compressBlock(c, tail, r, z.closed) + + z.currentBuffer = z.dstPool.Get().([]byte) // Put in .compressBlock z.currentBuffer = z.currentBuffer[:0] // Wait if flushing @@ -358,29 +359,37 @@ func (z *Writer) Write(p []byte) (int, error) { // Start receiving data from compressors go func() { listen := z.results + var failed bool for { r, ok := <-listen // If closed, we are finished. if !ok { return } + if failed { + close(r.notifyWritten) + continue + } buf := <-r.result n, err := z.w.Write(buf) if err != nil { z.pushError(err) close(r.notifyWritten) - return + failed = true + continue } if n != len(buf) { z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) + failed = true close(r.notifyWritten) - return + continue } z.dstPool.Put(buf) close(r.notifyWritten) } }() - z.currentBuffer = make([]byte, 0, z.blockSize) + z.currentBuffer = z.dstPool.Get().([]byte) + z.currentBuffer = z.currentBuffer[:0] } q := p for len(q) > 0 { @@ -390,7 +399,10 @@ func (z *Writer) Write(p []byte) (int, error) { } z.digest.Write(q[:length]) z.currentBuffer = append(z.currentBuffer, q[:length]...) - if len(z.currentBuffer) >= z.blockSize { + if len(z.currentBuffer) > z.blockSize { + panic("z.currentBuffer too large (most likely due to concurrent Write race)") + } + if len(z.currentBuffer) == z.blockSize { z.compressCurrent(false) if err := z.checkError(); err != nil { return len(p) - len(q) - length, err @@ -410,12 +422,13 @@ func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { close(r.result) z.wg.Done() }() - buf := z.dstPool.Get().([]byte) + buf := z.dstPool.Get().([]byte) // Corresponding Put in .Write's result writer dest := bytes.NewBuffer(buf[:0]) - compressor := z.dictFlatePool.Get().(*flate.Writer) + compressor := z.dictFlatePool.Get().(*flate.Writer) // Put below compressor.ResetDict(dest, prevTail) compressor.Write(p) + z.dstPool.Put(p) // Corresponding Get in .Write and .compressCurrent err := compressor.Flush() if err != nil { @@ -429,7 +442,12 @@ func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { return } } - z.dictFlatePool.Put(compressor) + z.dictFlatePool.Put(compressor) // Get above + + if prevTail != nil { + z.dstPool.Put(prevTail) // Get in .compressCurrent + } + // Read back buffer buf = dest.Bytes() r.result <- buf diff --git a/vendor/modules.txt b/vendor/modules.txt index 4ef5d4cc6..2b93c89f2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -68,13 +68,16 @@ github.com/containers/buildah/bind github.com/containers/buildah/chroot github.com/containers/buildah/docker github.com/containers/buildah/imagebuildah +github.com/containers/buildah/manifests github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/cli github.com/containers/buildah/pkg/formats +github.com/containers/buildah/pkg/manifests github.com/containers/buildah/pkg/overlay github.com/containers/buildah/pkg/parse github.com/containers/buildah/pkg/secrets +github.com/containers/buildah/pkg/supplemented github.com/containers/buildah/pkg/umask github.com/containers/buildah/util # github.com/containers/common v0.5.0 @@ -142,7 +145,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.16.3 +# github.com/containers/storage v1.16.5 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -313,14 +316,14 @@ github.com/inconshreveable/mousetrap github.com/ishidawataru/sctp # github.com/json-iterator/go v1.1.9 github.com/json-iterator/go -# github.com/klauspost/compress v1.10.2 +# github.com/klauspost/compress v1.10.3 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 github.com/klauspost/compress/snappy github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/pgzip v1.2.1 +# github.com/klauspost/pgzip v1.2.2 github.com/klauspost/pgzip # github.com/konsorten/go-windows-terminal-sequences v1.0.2 github.com/konsorten/go-windows-terminal-sequences |