diff options
75 files changed, 1866 insertions, 753 deletions
@@ -177,8 +177,8 @@ familiar container cli commands. For more details, see the [Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools). ## Podman Legacy API (Varlink) -Podman offers a Varlink-based API for remote management of containers. -However, this API has been deprecated by the REST API. +Podman offers a [Varlink-based API](https://github.com/containers/podman/blob/master/docs/tutorials/varlink_remote_client.md) +for remote management of containers. However, this API has been deprecated by the REST API. Varlink support is in maintenance mode, and will be removed in a future release. For more details, you can see [this blog](https://podman.io/blogs/2020/01/17/podman-new-api.html). diff --git a/cmd/podman/manifest/add.go b/cmd/podman/manifest/add.go index ca633263d..128bf66a7 100644 --- a/cmd/podman/manifest/add.go +++ b/cmd/podman/manifest/add.go @@ -4,14 +4,26 @@ import ( "context" "fmt" + "github.com/containers/common/pkg/auth" + "github.com/containers/image/v5/types" "github.com/containers/podman/v2/cmd/podman/registry" "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/util" "github.com/pkg/errors" "github.com/spf13/cobra" ) +// manifestAddOptsWrapper wraps entities.ManifestAddOptions and prevents leaking +// CLI-only fields into the API types. +type manifestAddOptsWrapper struct { + entities.ManifestAddOptions + + TLSVerifyCLI bool // CLI only + CredentialsCLI string +} + var ( - manifestAddOpts = entities.ManifestAddOptions{} + manifestAddOpts = manifestAddOptsWrapper{} addCmd = &cobra.Command{ Use: "add [flags] LIST LIST", Short: "Add images to a manifest list or image index", @@ -33,15 +45,48 @@ func init() { flags.BoolVar(&manifestAddOpts.All, "all", false, "add all of the list's images if the image is a list") flags.StringSliceVar(&manifestAddOpts.Annotation, "annotation", nil, "set an `annotation` for the specified image") flags.StringVar(&manifestAddOpts.Arch, "arch", "", "override the `architecture` of the specified image") + flags.StringVar(&manifestAddOpts.Authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + flags.StringVar(&manifestAddOpts.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") + flags.StringVar(&manifestAddOpts.CredentialsCLI, "creds", "", "use `[username[:password]]` for accessing the registry") + flags.StringSliceVar(&manifestAddOpts.Features, "features", nil, "override the `features` of the specified image") flags.StringVar(&manifestAddOpts.OS, "os", "", "override the `OS` of the specified image") flags.StringVar(&manifestAddOpts.OSVersion, "os-version", "", "override the OS `version` of the specified image") + flags.BoolVar(&manifestAddOpts.TLSVerifyCLI, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") flags.StringVar(&manifestAddOpts.Variant, "variant", "", "override the `Variant` of the specified image") + + if registry.IsRemote() { + _ = flags.MarkHidden("authfile") + _ = flags.MarkHidden("cert-dir") + _ = flags.MarkHidden("tls-verify") + } } func add(cmd *cobra.Command, args []string) error { + if err := auth.CheckAuthFile(manifestPushOpts.Authfile); err != nil { + return err + } + manifestAddOpts.Images = []string{args[1], args[0]} - listID, err := registry.ImageEngine().ManifestAdd(context.Background(), manifestAddOpts) + + if manifestAddOpts.CredentialsCLI != "" { + creds, err := util.ParseRegistryCreds(manifestAddOpts.CredentialsCLI) + if err != nil { + return err + } + manifestAddOpts.Username = creds.Username + manifestAddOpts.Password = creds.Password + } + + // TLS verification in c/image is controlled via a `types.OptionalBool` + // which allows for distinguishing among set-true, set-false, unspecified + // which is important to implement a sane way of dealing with defaults of + // boolean CLI flags. + if cmd.Flags().Changed("tls-verify") { + manifestAddOpts.SkipTLSVerify = types.NewOptionalBool(!manifestAddOpts.TLSVerifyCLI) + } + + listID, err := registry.ImageEngine().ManifestAdd(context.Background(), manifestAddOpts.ManifestAddOptions) if err != nil { return errors.Wrapf(err, "error adding to manifest list %s", args[0]) } diff --git a/cmd/podman/networks/create.go b/cmd/podman/networks/create.go index dabf6f0d2..68a577ae1 100644 --- a/cmd/podman/networks/create.go +++ b/cmd/podman/networks/create.go @@ -21,9 +21,6 @@ var ( RunE: networkCreate, Args: cobra.MaximumNArgs(1), Example: `podman network create podman1`, - Annotations: map[string]string{ - registry.ParentNSRequired: "", - }, } ) diff --git a/cmd/podman/networks/inspect.go b/cmd/podman/networks/inspect.go index f00d6b63c..c5872def7 100644 --- a/cmd/podman/networks/inspect.go +++ b/cmd/podman/networks/inspect.go @@ -22,9 +22,6 @@ var ( RunE: networkInspect, Example: `podman network inspect podman`, Args: cobra.MinimumNArgs(1), - Annotations: map[string]string{ - registry.ParentNSRequired: "", - }, } ) diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go index 3a2651cbc..b6fb2bb80 100644 --- a/cmd/podman/networks/list.go +++ b/cmd/podman/networks/list.go @@ -25,9 +25,6 @@ var ( Long: networklistDescription, RunE: networkList, Example: `podman network list`, - Annotations: map[string]string{ - registry.ParentNSRequired: "", - }, } ) diff --git a/cmd/podman/networks/rm.go b/cmd/podman/networks/rm.go index dfbb5d081..ac49993b7 100644 --- a/cmd/podman/networks/rm.go +++ b/cmd/podman/networks/rm.go @@ -19,9 +19,6 @@ var ( RunE: networkRm, Example: `podman network rm podman`, Args: cobra.MinimumNArgs(1), - Annotations: map[string]string{ - registry.ParentNSRequired: "", - }, } ) diff --git a/completions/bash/podman b/completions/bash/podman index e250f344b..e8185235b 100644 --- a/completions/bash/podman +++ b/completions/bash/podman @@ -1846,6 +1846,9 @@ _podman_manifest() { _podman_manifest_add() { local options_with_args=" --annotation + --authfile + --cert-dir + --creds --arch --features --os @@ -1857,6 +1860,7 @@ _podman_manifest_add() { --all --help -h + --tls-verify " _complete_ "$options_with_args" "$boolean_options" diff --git a/contrib/rootless-cni-infra/Containerfile b/contrib/rootless-cni-infra/Containerfile new file mode 100644 index 000000000..c5d812a6e --- /dev/null +++ b/contrib/rootless-cni-infra/Containerfile @@ -0,0 +1,35 @@ +ARG GOLANG_VERSION=1.15 +ARG ALPINE_VERSION=3.12 +ARG CNI_VERSION=v0.8.0 +ARG CNI_PLUGINS_VERSION=v0.8.7 +# Aug 20, 2020 +ARG DNSNAME_VESION=78b4da7bbfc51c27366da630e1df1c4f2e8b1b5b + +FROM golang:${GOLANG_VERSION}-alpine${ALPINE_VERSION} AS golang-base +RUN apk add --no-cache git + +FROM golang-base AS cnitool +RUN git clone https://github.com/containernetworking/cni /go/src/github.com/containernetworking/cni +WORKDIR /go/src/github.com/containernetworking/cni +ARG CNI_VERSION +RUN git checkout ${CNI_VERSION} +RUN go build -o /cnitool ./cnitool + +FROM golang-base AS dnsname +RUN git clone https://github.com/containers/dnsname /go/src/github.com/containers/dnsname +WORKDIR /go/src/github.com/containers/dnsname +ARG DNSNAME_VERSION +RUN git checkout ${DNSNAME_VERSION} +RUN go build -o /dnsname ./plugins/meta/dnsname + +FROM alpine:${ALPINE_VERSION} +RUN apk add --no-cache curl dnsmasq iptables ip6tables iproute2 +ARG TARGETARCH +ARG CNI_PLUGINS_VERSION +RUN mkdir -p /opt/cni/bin && \ + curl -fsSL https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_VERSION}/cni-plugins-linux-${TARGETARCH}-${CNI_PLUGINS_VERSION}.tgz | tar xz -C /opt/cni/bin +COPY --from=cnitool /cnitool /usr/local/bin +COPY --from=dnsname /dnsname /opt/cni/bin +COPY rootless-cni-infra /usr/local/bin +ENV CNI_PATH=/opt/cni/bin +CMD ["sleep", "infinity"] diff --git a/contrib/rootless-cni-infra/README.md b/contrib/rootless-cni-infra/README.md new file mode 100644 index 000000000..937e057fb --- /dev/null +++ b/contrib/rootless-cni-infra/README.md @@ -0,0 +1,22 @@ +# rootless-cni-infra + +Infra container for CNI-in-slirp4netns. + +## How it works + +When a CNI network is specified for `podman run` in rootless mode, Podman launches the `rootless-cni-infra` container to execute CNI plugins inside slirp4netns. + +The infra container is created per user, by executing an equivalent of: +`podman run -d --name rootless-cni-infra --pid=host --privileged -v $HOME/.config/cni/net.d:/etc/cni/net.d rootless-cni-infra`. +The infra container is automatically deleted when no CNI network is in use. + +Podman then allocates a CNI netns in the infra container, by executing an equivalent of: +`podman exec rootless-cni-infra rootless-cni-infra alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME`. + +The allocated netns is deallocated when the container is being removed, by executing an equivalent of: +`podman exec rootless-cni-infra rootless-cni-infra dealloc $CONTAINER_ID $NETWORK_NAME`. + +## Directory layout + +* `/run/rootless-cni-infra/${CONTAINER_ID}/pid`: PID of the `sleep infinity` process that corresponds to the allocated netns +* `/run/rootless-cni-infra/${CONTAINER_ID}/attached/${NETWORK_NAME}`: CNI result diff --git a/contrib/rootless-cni-infra/rootless-cni-infra b/contrib/rootless-cni-infra/rootless-cni-infra new file mode 100755 index 000000000..5a574d2eb --- /dev/null +++ b/contrib/rootless-cni-infra/rootless-cni-infra @@ -0,0 +1,147 @@ +#!/bin/sh +set -eu + +ARG0="$0" +VERSION="0.1.0" +BASE="/run/rootless-cni-infra" + +# CLI subcommand: "alloc $CONTAINER_ID $NETWORK_NAME $POD_NAME" +cmd_entrypoint_alloc() { + if [ "$#" -ne 3 ]; then + echo >&2 "Usage: $ARG0 alloc CONTAINER_ID NETWORK_NAME POD_NAME" + exit 1 + fi + + ID="$1" + NET="$2" + K8S_POD_NAME="$3" + + dir="${BASE}/${ID}" + mkdir -p "${dir}/attached" + + pid="" + if [ -f "${dir}/pid" ]; then + pid=$(cat "${dir}/pid") + else + unshare -n sleep infinity & + pid="$!" + echo "${pid}" >"${dir}/pid" + nsenter -t "${pid}" -n ip link set lo up + fi + CNI_ARGS="IgnoreUnknown=1;K8S_POD_NAME=${K8S_POD_NAME}" + nwcount=$(find "${dir}/attached" -type f | wc -l) + CNI_IFNAME="eth${nwcount}" + export CNI_ARGS CNI_IFNAME + cnitool add "${NET}" "/proc/${pid}/ns/net" >"${dir}/attached/${NET}" + + # return the result + ns="/proc/${pid}/ns/net" + echo "{\"ns\":\"${ns}\"}" +} + +# CLI subcommand: "dealloc $CONTAINER_ID $NETWORK_NAME" +cmd_entrypoint_dealloc() { + if [ "$#" -ne 2 ]; then + echo >&2 "Usage: $ARG0 dealloc CONTAINER_ID NETWORK_NAME" + exit 1 + fi + + ID=$1 + NET=$2 + + dir="${BASE}/${ID}" + if [ ! -f "${dir}/pid" ]; then + exit 0 + fi + pid=$(cat "${dir}/pid") + cnitool del "${NET}" "/proc/${pid}/ns/net" + rm -f "${dir}/attached/${NET}" + + nwcount=$(find "${dir}/attached" -type f | wc -l) + if [ "${nwcount}" = 0 ]; then + kill -9 "${pid}" + rm -rf "${dir}" + fi + + # return empty json + echo "{}" +} + +# CLI subcommand: "is-idle" +cmd_entrypoint_is_idle() { + if [ ! -d ${BASE} ]; then + echo '{"idle": true}' + elif [ -z "$(ls -1 ${BASE})" ]; then + echo '{"idle": true}' + else + echo '{"idle": false}' + fi +} + +# CLI subcommand: "print-cni-result $CONTAINER_ID $NETWORK_NAME" +cmd_entrypoint_print_cni_result() { + if [ "$#" -ne 2 ]; then + echo >&2 "Usage: $ARG0 print-cni-result CONTAINER_ID NETWORK_NAME" + exit 1 + fi + + ID=$1 + NET=$2 + + # the result shall be CNI JSON + cat "${BASE}/${ID}/attached/${NET}" +} + +# CLI subcommand: "print-netns-path $CONTAINER_ID" +cmd_entrypoint_print_netns_path() { + if [ "$#" -ne 1 ]; then + echo >&2 "Usage: $ARG0 print-netns-path CONTAINER_ID" + exit 1 + fi + + ID=$1 + + pid=$(cat "${BASE}/${ID}/pid") + path="/proc/${pid}/ns/net" + + # return the result + echo "{\"path\":\"${path}\"}" +} + +# CLI subcommand: "help" +cmd_entrypoint_help() { + echo "Usage: ${ARG0} COMMAND" + echo + echo "Rootless CNI Infra container" + echo + echo "Commands:" + echo " alloc Allocate a netns" + echo " dealloc Deallocate a netns" + echo " is-idle Print whether the infra container is idle" + echo " print-cni-result Print CNI result" + echo " print-netns-path Print netns path" + echo " help Print help" + echo " version Print version" +} + +# CLI subcommand: "version" +cmd_entrypoint_version() { + echo "{\"version\": \"${VERSION}\"}" +} + +# parse args +command="${1:-}" +if [ -z "$command" ]; then + echo >&2 "No command was specified. Run \`${ARG0} help\` to see the usage." + exit 1 +fi + +command_func=$(echo "cmd_entrypoint_${command}" | sed -e "s/-/_/g") +if ! command -v "${command_func}" >/dev/null 2>&1; then + echo >&2 "Unknown command: ${command}. Run \`${ARG0} help\` to see the usage." + exit 1 +fi + +# start the command func +shift +"${command_func}" "$@" diff --git a/docs/source/Tutorials.rst b/docs/source/Tutorials.rst index 33e4ae3d3..83818e3ae 100644 --- a/docs/source/Tutorials.rst +++ b/docs/source/Tutorials.rst @@ -6,7 +6,7 @@ Here are a number of useful tutorials to get you up and running with Podman. If * `Basic Setup and Use of Podman <https://github.com/containers/podman/blob/master/docs/tutorials/podman_tutorial.md>`_: Learn how to setup Podman and perform some basic commands with the utility. * `Basic Setup and Use of Podman in a Rootless environment <https://github.com/containers/podman/blob/master/docs/tutorials/rootless_tutorial.md>`_: The steps required to setup rootless Podman are enumerated. -* `Podman Mac Client tutorial <https://github.com/containers/podman/blob/master/docs/tutorials/mac_client.md>`_: Special setup for running the Podman remote client on a Mac and connecting to Podman running on a Linux VM are documented. +* `Podman Mac/Windows tutorial <https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md>`_: Special setup for running the Podman remote client on a Mac or Windows PC and connecting to Podman running on a Linux VM are documented. * `How to sign and distribute container images using Podman <https://github.com/containers/podman/blob/master/docs/tutorials/image_signing.md>`_: Learn how to setup and use image signing with Podman. * `Podman remote-client tutorial <https://github.com/containers/podman/blob/master/docs/tutorials/remote_client.md>`_: A brief how-to on using the Podman remote-client. * `How to use libpod for custom/derivative projects <https://github.com/containers/podman/blob/master/docs/tutorials/podman-derivative-api.md>`_: How the libpod API can be used within your own project. diff --git a/docs/source/markdown/podman-manifest-add.1.md b/docs/source/markdown/podman-manifest-add.1.md index 44815def5..c4d4417c4 100644 --- a/docs/source/markdown/podman-manifest-add.1.md +++ b/docs/source/markdown/podman-manifest-add.1.md @@ -33,6 +33,25 @@ the image. If *imageName* refers to a manifest list or image index, the architecture information will be retrieved from it. Otherwise, it will be retrieved from the image's configuration information. +**--authfile**=*path* + +Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`. +If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. (Not available for remote commands) + +Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE +environment variable. `export REGISTRY_AUTH_FILE=path` + +**--cert-dir**=*path* + +Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry. +Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands) + +**--creds**=*creds* + +The [username[:password]] to use to authenticate with the registry if required. +If one or both values are not supplied, a command line prompt will appear and the +value can be entered. The password is entered without echo. + **--features** Specify the features list which the list or index records as requirements for @@ -50,6 +69,10 @@ configuration information. Specify the OS version which the list or index records as a requirement for the image. This option is rarely used. +**--tls-verify** + +Require HTTPS and verify certificates when talking to container registries (defaults to true). (Not available for remote commands) + **--variant** Specify the variant which the list or index records for the image. This option diff --git a/docs/tutorials/README.md b/docs/tutorials/README.md index 4beb069ac..7f7b4853d 100644 --- a/docs/tutorials/README.md +++ b/docs/tutorials/README.md @@ -12,9 +12,9 @@ Learn how to setup Podman and perform some basic commands with the utility. The steps required to setup rootless Podman are enumerated. -**[Setup on OS X](mac_client.md)** +**[Setup Mac/Windows](mac_win_client.md) -Special setup for running the Podman remote client on a Mac and connecting to Podman running on a Linux VM are documented. +Special setup for running the Podman remote client on a Mac or Windows PC and connecting to Podman running on a Linux VM are documented. **[Remote Client](remote_client.md)** diff --git a/docs/tutorials/mac_client.md b/docs/tutorials/mac_client.md index f6c9160a8..f406ca54d 100644 --- a/docs/tutorials/mac_client.md +++ b/docs/tutorials/mac_client.md @@ -1,99 +1,2 @@ -# Podman Mac Client tutorial - -## What is the Podman Mac Client - -First and foremost, the Mac Client is under heavy development. We are working on getting the -Mac client to be packaged and run for a native-like experience. This is the setup tutorial -for the Mac client at its current stage of development and packaging. - -The purpose of the Mac client for Podman is to allow users to run Podman on a Mac. Since Podman is a Linux -container engine, The Mac client is actually a version of the [Podman-remote client](remote_client.md), -edited to that the client side works on a Mac machine, and connects to a Podman "backend" on a Linux -machine, virtual or physical. The goal is to have a native-like experience when working with the Mac -client, so the command line interface of the remote client is exactly the same as the regular Podman -commands with the exception of some flags and commands that do not apply to the Mac client. - -## What you need - -To use the Mac client, you will need a binary built for MacOS and a Podman "backend" on a Linux machine; -hereafter referred to as the Podman node. In this context, a Podman node is a Linux system with Podman -installed on it and the varlink service activated. You will also need to be able to ssh into this -system as a user with privileges to the varlink socket (more on this later). - -For best results, use the most recent version of MacOS - -## Getting the Mac client -The Mac client is available through [Homebrew](https://brew.sh/). -``` -$ brew cask install podman -``` - -## Setting up the client and Podman node connection - -To use the Mac client, you must perform some setup on both the Mac and Podman nodes. In this case, -the Mac node refers to the Mac on which Podman is being run; and the Podman node refers to where -Podman and its storage reside. - -### Connection settings -Your Linux box must have ssh enabled, and you must copy your Mac's public key from `~/.sconf sh/id.pub` to -`/root/.ssh/authorized_keys` on your Linux box using `ssh-copy-id` This allows for the use of SSH keys -for remote access. - -You may need to edit your `/etc/ssh/sshd_config` in your Linux machine as follows: -``` -PermitRootLogin yes -``` - -Use of SSH keys are strongly encouraged to ensure a secure login. However, if you wish to avoid ‘logging in’ every -time you run a Podman command, you may edit your `/etc/ssh/sshd_config` on your Linux machine as follows: -``` -PasswordAuthentication no -PermitRootLogin without-password -``` - -### Podman node setup -The Podman node must be running a Linux distribution that supports Podman and must have Podman (not the Mac -client) installed. You must also have root access to the node. Check if your system uses systemd: -``` -$cat /proc/1/comm -systemd -``` -If it does, then simply start the Podman varlink socket: -``` -$ sudo systemctl start io.podman.socket -$ sudo systemctl enable io.podman.socket -``` - -If your system cannot use systemd, then you can manually establish the varlink socket with the Podman -command: -``` -$ sudo podman --log-level debug varlink --timeout 0 unix://run/podman/io.podman -``` - -### Required permissions -For now, the Mac client requires that you be able to run a privileged Podman and have privileged ssh -access to the remote system. This limitation is being worked on. - -#### Running the remote client -There are three different ways to pass connection information into the client: flags, conf file, and -environment variables. All three require information on username and a remote host ip address. Most often, -your username should be root and you can obtain your remote-host-ip using `ip addr` - -To connect using flags, you can use -``` -$ podman --remote-host remote-host-ip --username root images -REPOSITORY TAG IMAGE ID CREATED SIZE -quay.io/podman/stable latest 9c1e323be87f 10 days ago 414 MB -localhost/test latest 4b8c27c343e1 4 weeks ago 253 MB -k8s.gcr.io/pause 3.1 da86e6ba6ca1 20 months ago 747 kB -``` -If the conf file is set up, you may simply use Podman as you would on the linux machine. Take a look at -[podman-remote.conf.5.md](https://github.com/containers/podman/blob/master/docs/podman-remote.conf.5.md) on how to use the conf file: - -``` -$ podman images -REPOSITORY TAG IMAGE ID CREATED SIZE -quay.io/podman/stable latest 9c1e323be87f 10 days ago 414 MB -localhost/test latest 4b8c27c343e1 4 weeks ago 253 MB -k8s.gcr.io/pause 3.1 da86e6ba6ca1 20 months ago 747 kB -``` +# [Podman Mac Client tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md) +This tutorial has moved! You can find out how to set up Podman on MacOS (as well as Windows) [here](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md) diff --git a/docs/tutorials/mac_win_client.md b/docs/tutorials/mac_win_client.md new file mode 100644 index 000000000..63830a5b1 --- /dev/null +++ b/docs/tutorials/mac_win_client.md @@ -0,0 +1,111 @@ +# Podman Remote clients for MacOS and Windows + +## Introduction + +The core Podman runtime environment can only run on Linux operating systems. But other operating systems can use the “remote client” to manage their containers to a Linux backend. This remote client is nearly identical to the standard Podman program. Certain functions that do not make sense for remote clients have been removed. For example, the “--latest” switch for container commands has been removed. + +### Brief architecture + +The remote client uses a client-server model. You need Podman installed on a Linux machine or VM that also has the SSH daemon running. On the local operating system, when you execute a Podman command, Podman connects to the server via SSH. It then connects to the Podman service by using systemd socket activation. The Podman commands are executed on the server. From the client's point of view, it seems like Podman runs locally. + +## Obtaining and installing Podman + +### Windows + +Installing the Windows Podman client begins by downloading the Podman windows installer. The windows installer is built with each Podman release and is downloadable from its [release description page](https://github.com/containers/podman/releases/latest). You can also build the installer from source using the `podman.msi` Makefile endpoint. + +Once you have downloaded the installer, simply double click the installer and Podman will be installed. The path is also set to put `podman` in the default user path. + +Podman must be run at a command prompt using the Windows ‘cmd” or powershell applications. + +### MacOS + +The Mac Client is available through [Homebrew](https://brew.sh/). You can download homebrew via the instructions on their site. Install podman using: +``` +$ brew install podman +``` + +## Creating the first connection + +### Enable the Podman service on the server machine. + +Before performing any Podman client commands, you must enable the podman.sock SystemD service on the Linux server. In these examples, we are running Podman as a normal, unprivileged user, also known as a rootless user. By default, the rootless socket listens at `/run/user/${UID}/podman/podman.sock`. You can enable this socket, permanently using the following command: +``` +$ systemctl --user enable podman.socket +``` +You will need to enable linger for this user in order for the socket to work when the user is not logged in. + +``` +$ sudo loginctl enable-linger $USER +``` + +You can verify that the socket is listening with a simple Podman command. + +``` +$ podman --remote info +host: + arch: amd64 + buildahVersion: 1.16.0-dev + cgroupVersion: v2 + conmon: + package: conmon-2.0.19-1.fc32.x86_64 +``` + +#### Enable sshd + +In order for the client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled. +``` +$ sudo systemctl enable -s sshd +``` + +#### Setting up SSH +Remote podman uses SSH to communicate between the client and server. The remote client works considerably smoother using SSH keys. To set up your ssh connection, you need to generate an ssh key pair from your client machine. +``` +$ ssh-keygen +``` +Your public key by default should be in your home directory under .ssh\id_rsa.pub. You then need to copy the contents of id_rsa.pub and append it into ~/.ssh/authorized_keys on the Linux server. On a Mac, you can automate this using ssh-copy-id. + +If you do not wish to use SSH keys, you will be prompted with each Podman command for your login password. + +## Using the client + +The first step in using the Podman remote client is to configure a connection.. + +You can add a connection by using the `podman system connection add` command. + +``` +C:\Users\baude> podman system connection add baude --identity c:\Users\baude\.ssh\id_rsa ssh://192.168.122.1/run/user/1000/podman/podman.sock +``` + +This will add a remote connection to Podman and if it is the first connection added, it will mark the connection as the default. You can observe your connections with `podman system connection list` + +``` +C:\Users\baude> podman system connection list +Name Identity URI +baude* id_rsa ssh://baude@192.168.122.1/run/user/1000/podman/podman.sock +``` + +Now we can test the connection with `podman info`. + +``` +C:\Users\baude> podman info +host: + arch: amd64 + buildahVersion: 1.16.0-dev + cgroupVersion: v2 + conmon: + package: conmon-2.0.19-1.fc32.x86_64 +``` + +Podman has also introduced a “--connection” flag where you can use other connections you have defined. If no connection is provided, the default connection will be used. + +``` +C:\Users\baude> podman system connection --help +``` + +## Wrap up + +You can use the podman remote clients to manage your containers running on a Linux server. The communication between client and server relies heavily on SSH connections and the use of SSH keys are encouraged. Once you have Podman installed on your remote client, you should set up a connection using `podman system connection add` which will then be used by subsequent Podman commands. + +## History +Originally published on [Red Hat Enable Sysadmin](https://www.redhat.com/sysadmin/podman-clients-macos-windows) diff --git a/docs/tutorials/podman_tutorial.md b/docs/tutorials/podman_tutorial.md index 97268fc41..85b95af04 100644 --- a/docs/tutorials/podman_tutorial.md +++ b/docs/tutorials/podman_tutorial.md @@ -5,7 +5,7 @@ Podman is a utility provided as part of the libpod library. It can be used to c containers. The following tutorial will teach you how to set up Podman and perform some basic commands with Podman. -If you are running on a Mac, you should instead follow the [Mac tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_client.md) +If you are running on a Mac or Windows PC, you should instead follow the [Mac and Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md) to set up the remote Podman client. **NOTE**: the code samples are intended to be run as a non-root user, and use `sudo` where diff --git a/docs/tutorials/remote_client.md b/docs/tutorials/remote_client.md index d4c43dda2..ad506d19a 100644 --- a/docs/tutorials/remote_client.md +++ b/docs/tutorials/remote_client.md @@ -1,88 +1,112 @@ # Podman remote-client tutorial -## What is the remote-client +## Introduction +The purpose of the Podman remote-client is to allow users to interact with a Podman "backend" while on a separate client. The command line interface of the remote client is exactly the same as the regular Podman commands with the exception of some flags being removed as they do not apply to the remote-client. -First and foremost, the remote-client is under heavy development. We are adding new -commands and functions frequently. We also are working on a rootless implementation that -does not require privileged users. +The remote client takes advantage of a client-server model. You need Podman installed on a Linux machine or VM that also has the SSH daemon running. On the local operating system, when you execute a Podman command, Podman connects to the server via SSH. It then connects to the Podman service by using systemd socket activation, and hitting our [Rest API](https://docs.podman.io/en/latest/_static/api.html). The Podman commands are executed on the server. From the client's point of view, it seems like Podman runs locally. -The purpose of the Podman remote-client is to allow users to interact with a Podman "backend" -while on a separate client. The command line interface of the remote client is exactly the -same as the regular Podman commands with the exception of some flags being removed as they -do not apply to the remote-client. +This tutorial is for running Podman remotely on Linux. If you are using a Mac or a Windows PC, please follow the [Mac and Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md) -## What you need -To use the remote-client, you will need a binary for your client and a Podman "backend"; hereafter -referred to as the Podman node. In this context, a Podman node is a Linux system with Podman -installed on it and the varlink service activated. You will also need to be able to ssh into this -system as a user with privileges to the varlink socket (more on this later). +## Obtaining and installing Podman -## Building the remote client -At this time, the Podman remote-client is not being packaged for any distribution. It must be built from -source. To set up your build environment, see [Installation notes](https://github.com/containers/podman/blob/master/install.md) and follow the -section [Building from scratch](https://github.com/containers/podman/blob/master/install.md#building-from-scratch). Once you can successfully -build the regular Podman binary, you can now build the remote-client. +### Client machine +You will need either Podman or the podman-remote client. The difference between the two is that the compiled podman-remote client can only act as a remote client connecting to a backend, while Podman can run local, standard Podman commands, as well as act as a remote client (using `podman --remote`) + +If you already have Podman installed, you do not need to install podman-remote. + +You can find out how to [install Podman here](https://podman.io/getting-started/installation) + +If you would like to install only the podman-remote client, it is downloadable from its [release description page](https://github.com/containers/podman/releases/latest). You can also build it from source using the `make podman-remote` + + +### Server Machine +You will need to [install Podman](https://podman.io/getting-started/installation) on your server machine. + + +## Creating the first connection + +### Enable the Podman service on the server machine. + +Before performing any Podman client commands, you must enable the podman.sock SystemD service on the Linux server. In these examples, we are running Podman as a normal, unprivileged user, also known as a rootless user. By default, the rootless socket listens at `/run/user/${UID}/podman/podman.sock`. You can enable this socket permanently using the following command: ``` -$ make podman-remote +$ systemctl --user enable podman.socket ``` -Like building the regular Podman, the resulting binary will be in the *bin* directory. This is the binary -you will run on the remote node later in the instructions. +You will need to enable linger for this user in order for the socket to work when the user is not logged in: -## Setting up the remote and Podman nodes +``` +$ sudo loginctl enable-linger $USER +``` +This is only required if you are not running Podman as root. -To use the remote-client, you must perform some setup on both the remote and Podman nodes. In this case, -the remote node refers to where the remote-client is being run; and the Podman node refers to where -Podman and its storage reside. +You can verify that the socket is listening with a simple Podman command. +``` +$ podman --remote info +host: + arch: amd64 + buildahVersion: 1.16.0-dev + cgroupVersion: v2 + conmon: + package: conmon-2.0.19-1.fc32.x86_64 +``` -### Podman node setup +#### Enable sshd -Varlink bridge support is provided by the varlink cli command and installed using: +In order for the Podman client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled. ``` -$ sudo dnf install varlink-cli +$ sudo systemctl enable -s sshd ``` -The Podman node must have Podman (not the remote-client) installed as normal. If your system uses systemd, -then simply start the Podman varlink socket. +#### Setting up SSH +Remote Podman uses SSH to communicate between the client and server. The remote client works considerably smoother using SSH keys. To set up your ssh connection, you need to generate an ssh key pair from your client machine. ``` -$ sudo systemctl start io.podman.socket +$ ssh-keygen ``` +Your public key by default should be in your home directory under ~/.ssh/id_rsa.pub. You then need to copy the contents of id_rsa.pub and append it into ~/.ssh/authorized_keys on the Linux server. You can automate this using ssh-copy-id. + +If you do not wish to use SSH keys, you will be prompted with each Podman command for your login password. + +## Using the client + +Note: `podman-remote` is equivalent to `podman --remote` here, depending on what you have chosen to install. + +The first step in using the Podman remote client is to configure a connection. + +You can add a connection by using the `podman-remote system connection add` command. -If your system cannot use systemd, then you can manually establish the varlink socket with the Podman -command: ``` -$ sudo podman --log-level debug varlink --timeout 0 unix://run/podman/io.podman +$ podman-remote system connection add myuser --identity ~/.ssh/id_rsa ssh://192.168.122.1/run/user/1000/podman/podman.sock ``` -### Required permissions -For now, the remote-client requires that you be able to run a privileged Podman and have privileged ssh -access to the remote system. This limitation is being worked on. +This will add a remote connection to Podman and if it is the first connection added, it will mark the connection as the default. You can observe your connections with `podman-remote system connection list`: -### Remote node setup - -#### Initiate an ssh session to the Podman node -To use the remote client, an ssh connection to the Podman server must be established. +``` +$ podman-remote system connection list +Name Identity URI +myuser* id_rsa ssh://myuser@192.168.122.1/run/user/1000/podman/podman.sock +``` -Using the varlink bridge, an ssh tunnel must be initiated to connect to the server. Podman must then be informed of the location of the sshd server on the targeted server +Now we can test the connection with `podman info`: ``` -$ export PODMAN_VARLINK_BRIDGE=$'ssh -T -p22 root@remotehost -- "varlink -A \'podman varlink \$VARLINK_ADDRESS\' bridge"' -$ bin/podman-remote images -REPOSITORY TAG IMAGE ID CREATED SIZE -docker.io/library/ubuntu latest 47b19964fb50 2 weeks ago 90.7 MB -docker.io/library/alpine latest caf27325b298 3 weeks ago 5.8 MB -quay.io/cevich/gcloud_centos latest 641dad61989a 5 weeks ago 489 MB -k8s.gcr.io/pause 3.1 da86e6ba6ca1 14 months ago 747 kB +$ podman-remote info +host: + arch: amd64 + buildahVersion: 1.16.0-dev + cgroupVersion: v2 + conmon: + package: conmon-2.0.19-1.fc32.x86_64 ``` -The PODMAN_VARLINK_BRIDGE variable may be added to your log in settings. It does not change per connection. +Podman-remote has also introduced a “--connection” flag where you can use other connections you have defined. If no connection is provided, the default connection will be used. -If coming from a Windows machine, the PODMAN_VARLINK_BRIDGE is formatted as: ``` -set PODMAN_VARLINK_BRIDGE=C:\Windows\System32\OpenSSH\ssh.exe -T -p22 root@remotehost -- varlink -A "podman varlink $VARLINK_ADDRESS" bridge +$ podman-remote system connection --help ``` -The arguments before the `--` are presented to ssh while the arguments after are for the varlink cli. The varlink arguments should be copied verbatim. - - `-p` is the port on the remote host for the ssh tunnel. `22` is the default. - - `root` is the currently supported user, while `remotehost` is the name or IP address of the host providing the Podman service. - - `-i` may be added to select an identity file. +## Wrap up + +You can use the Podman remote clients to manage your containers running on a Linux server. The communication between client and server relies heavily on SSH connections and the use of SSH keys are encouraged. Once you have Podman installed on your remote client, you should set up a connection using `podman-remote system connection add` which will then be used by subsequent Podman commands. + +## History +Adapted from the [Mac and Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md) diff --git a/docs/tutorials/varlink_remote_client.md b/docs/tutorials/varlink_remote_client.md new file mode 100644 index 000000000..54c648a48 --- /dev/null +++ b/docs/tutorials/varlink_remote_client.md @@ -0,0 +1,89 @@ +# Podman varlink remote-client tutorial [DEPRECATED] + +## What is the varlink client + +This API has been deprecated by the [REST API](https://docs.podman.io/en/latest/_static/api.html). +For usage on Windows and Mac, please reference the [Podman Mac/Windows tutorial](https://github.com/containers/podman/blob/master/docs/tutorials/mac_win_client.md) +Varlink support is in maintenance mode, and will be removed in a future release. +For more details, you can see [this blog](https://podman.io/blogs/2020/01/17/podman-new-api.html). + +The purpose of the Podman remote-client is to allow users to interact with a Podman "backend" +while on a separate client. The command line interface of the remote client is exactly the +same as the regular Podman commands with the exception of some flags being removed as they +do not apply to the remote-client. + +## What you need +To use the remote-client, you will need a binary for your client and a Podman "backend"; hereafter +referred to as the Podman node. In this context, a Podman node is a Linux system with Podman +installed on it and the varlink service activated. You will also need to be able to ssh into this +system as a user with privileges to the varlink socket (more on this later). + +## Building the remote client +At this time, the Podman remote-client is not being packaged for any distribution. It must be built from +source. To set up your build environment, see [Installation notes](https://github.com/containers/podman/blob/master/install.md) and follow the +section [Building from scratch](https://github.com/containers/podman/blob/master/install.md#building-from-scratch). Once you can successfully +build the regular Podman binary, you can now build the remote-client. +``` +$ make podman-remote +``` +Like building the regular Podman, the resulting binary will be in the *bin* directory. This is the binary +you will run on the remote node later in the instructions. + +## Setting up the remote and Podman nodes + +To use the remote-client, you must perform some setup on both the remote and Podman nodes. In this case, +the remote node refers to where the remote-client is being run; and the Podman node refers to where +Podman and its storage reside. + + +### Podman node setup + +Varlink bridge support is provided by the varlink cli command and installed using: +``` +$ sudo dnf install varlink-cli +``` + +The Podman node must have Podman (not the remote-client) installed as normal. If your system uses systemd, +then simply start the Podman varlink socket. +``` +$ sudo systemctl start io.podman.socket +``` + +If your system cannot use systemd, then you can manually establish the varlink socket with the Podman +command: +``` +$ sudo podman --log-level debug varlink --timeout 0 unix://run/podman/io.podman +``` + +### Required permissions +For now, the remote-client requires that you be able to run a privileged Podman and have privileged ssh +access to the remote system. This limitation is being worked on. + +### Remote node setup + +#### Initiate an ssh session to the Podman node +To use the remote client, an ssh connection to the Podman server must be established. + +Using the varlink bridge, an ssh tunnel must be initiated to connect to the server. Podman must then be informed of the location of the sshd server on the targeted server + +``` +$ export PODMAN_VARLINK_BRIDGE=$'ssh -T -p22 root@remotehost -- "varlink -A \'podman varlink \$VARLINK_ADDRESS\' bridge"' +$ bin/podman-remote images +REPOSITORY TAG IMAGE ID CREATED SIZE +docker.io/library/ubuntu latest 47b19964fb50 2 weeks ago 90.7 MB +docker.io/library/alpine latest caf27325b298 3 weeks ago 5.8 MB +quay.io/cevich/gcloud_centos latest 641dad61989a 5 weeks ago 489 MB +k8s.gcr.io/pause 3.1 da86e6ba6ca1 14 months ago 747 kB +``` + +The PODMAN_VARLINK_BRIDGE variable may be added to your log in settings. It does not change per connection. + +If coming from a Windows machine, the PODMAN_VARLINK_BRIDGE is formatted as: +``` +set PODMAN_VARLINK_BRIDGE=C:\Windows\System32\OpenSSH\ssh.exe -T -p22 root@remotehost -- varlink -A "podman varlink $VARLINK_ADDRESS" bridge +``` + +The arguments before the `--` are presented to ssh while the arguments after are for the varlink cli. The varlink arguments should be copied verbatim. + - `-p` is the port on the remote host for the ssh tunnel. `22` is the default. + - `root` is the currently supported user, while `remotehost` is the name or IP address of the host providing the Podman service. + - `-i` may be added to select an identity file. @@ -15,7 +15,7 @@ require ( github.com/containers/conmon v2.0.20+incompatible github.com/containers/image/v5 v5.5.2 github.com/containers/psgo v1.5.1 - github.com/containers/storage v1.23.4 + github.com/containers/storage v1.23.5 github.com/coreos/go-systemd/v22 v22.1.0 github.com/cri-o/ocicni v0.2.0 github.com/cyphar/filepath-securejoin v0.2.2 @@ -62,7 +62,7 @@ require ( golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed k8s.io/api v0.0.0-20190620084959-7cf5895f2711 - k8s.io/apimachinery v0.19.0 + k8s.io/apimachinery v0.19.1 k8s.io/client-go v0.0.0-20190620085101-78d2af792bab ) @@ -85,8 +85,8 @@ github.com/containers/psgo v1.5.1 h1:MQNb7FLbXqBdqz6u4lI2QWizVz4RSTzs1+Nk9XT1iVA github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU= github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw= github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg= -github.com/containers/storage v1.23.4 h1:1raHKGNs2C52tEq2ydHqZ+wu2u1d79BHMO6O5JO20xQ= -github.com/containers/storage v1.23.4/go.mod h1:KzpVgmUucelPYHq2YsseUTiTuucdVh3xfpPNmxmPZRU= +github.com/containers/storage v1.23.5 h1:He9I6y1vRVXYoQg4v2Q9HFAcX4dI3V5MCCrjeBcjkCY= +github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38= @@ -258,6 +258,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk= github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -643,8 +645,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= -k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ= -k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.1 h1:cwsxZazM/LA9aUsBaL4bRS5ygoM6bYp8dFk22DSYQa4= +k8s.io/apimachinery v0.19.1/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g= k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/libpod/container_internal.go b/libpod/container_internal.go index c41d81a2b..5a0a0edfa 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -380,6 +380,8 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) { } dest.GIDMap = append(dest.GIDMap, g) } + dest.HostUIDMapping = false + dest.HostGIDMapping = false } } @@ -957,8 +959,10 @@ func (c *Container) completeNetworkSetup() error { if err := c.syncContainer(); err != nil { return err } - if c.config.NetMode.IsSlirp4netns() { + if rootless.IsRootless() { return c.runtime.setupRootlessNetNS(c) + } else if c.config.NetMode.IsSlirp4netns() { + return c.runtime.setupSlirp4netns(c) } if err := c.runtime.setupNetNS(c); err != nil { return err diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index f789b0069..605b526a4 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -84,7 +84,11 @@ func (c *Container) prepare() error { // Set up network namespace if not already set up noNetNS := c.state.NetNS == nil if c.config.CreateNetNS && noNetNS && !c.config.PostConfigureNetNS { - netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c) + if rootless.IsRootless() && len(c.config.Networks) > 0 { + netNS, networkStatus, createNetNSErr = AllocRootlessCNI(context.Background(), c) + } else { + netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c) + } if createNetNSErr != nil { return } @@ -98,8 +102,12 @@ func (c *Container) prepare() error { } // handle rootless network namespace setup - if noNetNS && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS { - createNetNSErr = c.runtime.setupRootlessNetNS(c) + if noNetNS && !c.config.PostConfigureNetNS { + if rootless.IsRootless() { + createNetNSErr = c.runtime.setupRootlessNetNS(c) + } else if c.config.NetMode.IsSlirp4netns() { + createNetNSErr = c.runtime.setupSlirp4netns(c) + } } }() // Mount storage if not mounted diff --git a/libpod/container_validate.go b/libpod/container_validate.go index d657e3549..b78168cd1 100644 --- a/libpod/container_validate.go +++ b/libpod/container_validate.go @@ -2,7 +2,6 @@ package libpod import ( "github.com/containers/podman/v2/libpod/define" - "github.com/containers/podman/v2/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -68,16 +67,6 @@ func (c *Container) validate() error { } } - // Rootless has some requirements, compared to networks. - if rootless.IsRootless() { - if len(c.config.Networks) > 0 { - return errors.Wrapf(define.ErrInvalidArg, "cannot join CNI networks if running rootless") - } - - // TODO: Should we make sure network mode is set to Slirp if set - // at all? - } - // Can only set static IP or MAC is creating a network namespace. if !c.config.CreateNetNS && (c.config.StaticIP != nil || c.config.StaticMAC != nil) { return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if not creating a network namespace") diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 6f266e5d6..c0508ce39 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -4,6 +4,7 @@ package libpod import ( "bytes" + "context" "crypto/rand" "fmt" "io" @@ -208,6 +209,20 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) { // Configure the network namespace for a rootless container func (r *Runtime) setupRootlessNetNS(ctr *Container) error { + if ctr.config.NetMode.IsSlirp4netns() { + return r.setupSlirp4netns(ctr) + } + if len(ctr.config.Networks) > 0 { + // set up port forwarder for CNI-in-slirp4netns + netnsPath := ctr.state.NetNS.Path() + // TODO: support slirp4netns port forwarder as well + return r.setupRootlessPortMappingViaRLK(ctr, netnsPath) + } + return nil +} + +// setupSlirp4netns can be called in rootful as well as in rootless +func (r *Runtime) setupSlirp4netns(ctr *Container) error { path := r.config.Engine.NetworkCmdPath if path == "" { @@ -711,7 +726,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) - // rootless containers do not use the CNI plugin + // rootless containers do not use the CNI plugin directly if !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { var requestedIP net.IP if ctr.requestedIP != nil { @@ -738,6 +753,13 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { } } + // CNI-in-slirp4netns + if rootless.IsRootless() && len(ctr.config.Networks) != 0 { + if err := DeallocRootlessCNI(context.Background(), ctr); err != nil { + return errors.Wrapf(err, "error tearing down CNI-in-slirp4netns for container %s", ctr.ID()) + } + } + // First unmount the namespace if err := netns.UnmountNS(ctr.state.NetNS); err != nil { return errors.Wrapf(err, "error unmounting network namespace for container %s", ctr.ID()) diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go index dd72a3fd8..76bb01424 100644 --- a/libpod/networking_unsupported.go +++ b/libpod/networking_unsupported.go @@ -8,6 +8,10 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) error { return define.ErrNotImplemented } +func (r *Runtime) setupSlirp4netns(ctr *Container) error { + return define.ErrNotImplemented +} + func (r *Runtime) setupNetNS(ctr *Container) error { return define.ErrNotImplemented } diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index f66835771..bb138ca14 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -1086,7 +1086,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe) cmd.ExtraFiles = append(cmd.ExtraFiles, envFiles...) - if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() { + if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { ports, err := bindPorts(ctr.config.PortMappings) if err != nil { return err @@ -1098,7 +1098,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co cmd.ExtraFiles = append(cmd.ExtraFiles, ports...) } - if ctr.config.NetMode.IsSlirp4netns() { + if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() { if ctr.config.PostConfigureNetNS { havePortMapping := len(ctr.Config().PortMappings) > 0 if havePortMapping { diff --git a/libpod/options.go b/libpod/options.go index dccbb8741..7eec530ea 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -18,6 +18,7 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -897,6 +898,17 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption { ctr.config.UserNsCtr = nsCtr.ID() ctr.config.IDMappings = nsCtr.config.IDMappings + g := generate.NewFromSpec(ctr.config.Spec) + + g.ClearLinuxUIDMappings() + for _, uidmap := range nsCtr.config.IDMappings.UIDMap { + g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size)) + } + g.ClearLinuxGIDMappings() + for _, gidmap := range nsCtr.config.IDMappings.GIDMap { + g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size)) + } + ctr.config.IDMappings = nsCtr.config.IDMappings return nil } } diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go new file mode 100644 index 000000000..76dbfdcae --- /dev/null +++ b/libpod/rootless_cni_linux.go @@ -0,0 +1,320 @@ +// +build linux + +package libpod + +import ( + "bytes" + "context" + "io" + "path/filepath" + "runtime" + + cnitypes "github.com/containernetworking/cni/pkg/types/current" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/podman/v2/libpod/define" + "github.com/containers/podman/v2/libpod/image" + "github.com/containers/podman/v2/pkg/util" + "github.com/containers/storage/pkg/lockfile" + "github.com/hashicorp/go-multierror" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var rootlessCNIInfraImage = map[string]string{ + // Built from ../contrib/rootless-cni-infra + // TODO: move to Podman's official quay + "amd64": "ghcr.io/akihirosuda/podman-rootless-cni-infra:gd34868a13-amd64", +} + +const ( + rootlessCNIInfraContainerNamespace = "podman-system" + rootlessCNIInfraContainerName = "rootless-cni-infra" +) + +// AllocRootlessCNI allocates a CNI netns inside the rootless CNI infra container. +// Locks "rootless-cni-infra.lck". +// +// When the infra container is not running, it is created. +// +// AllocRootlessCNI does not lock c. c should be already locked. +func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes.Result, error) { + if len(c.config.Networks) == 0 { + return nil, nil, errors.New("allocRootlessCNI shall not be called when len(c.config.Networks) == 0") + } + l, err := getRootlessCNIInfraLock(c.runtime) + if err != nil { + return nil, nil, err + } + l.Lock() + defer l.Unlock() + infra, err := ensureRootlessCNIInfraContainerRunning(ctx, c.runtime) + if err != nil { + return nil, nil, err + } + k8sPodName := getPodOrContainerName(c) // passed to CNI as K8S_POD_NAME + cniResults := make([]*cnitypes.Result, len(c.config.Networks)) + for i, nw := range c.config.Networks { + cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName) + if err != nil { + return nil, nil, err + } + cniResults[i] = cniRes + } + nsObj, err := rootlessCNIInfraGetNS(infra, c.ID()) + if err != nil { + return nil, nil, err + } + logrus.Debugf("rootless CNI: container %q will join %q", c.ID(), nsObj.Path()) + return nsObj, cniResults, nil +} + +// DeallocRootlessCNI deallocates a CNI netns inside the rootless CNI infra container. +// Locks "rootless-cni-infra.lck". +// +// When the infra container is no longer needed, it is removed. +// +// DeallocRootlessCNI does not lock c. c should be already locked. +func DeallocRootlessCNI(ctx context.Context, c *Container) error { + if len(c.config.Networks) == 0 { + return errors.New("deallocRootlessCNI shall not be called when len(c.config.Networks) == 0") + } + l, err := getRootlessCNIInfraLock(c.runtime) + if err != nil { + return err + } + l.Lock() + defer l.Unlock() + infra, _ := getRootlessCNIInfraContainer(c.runtime) + if infra == nil { + return nil + } + var errs *multierror.Error + for _, nw := range c.config.Networks { + err := rootlessCNIInfraCallDelloc(infra, c.ID(), nw) + if err != nil { + errs = multierror.Append(errs, err) + } + } + if isIdle, err := rootlessCNIInfraIsIdle(infra); isIdle || err != nil { + if err != nil { + logrus.Warn(err) + } + logrus.Debugf("rootless CNI: removing infra container %q", infra.ID()) + if err := c.runtime.removeContainer(ctx, infra, true, false, true); err != nil { + return err + } + logrus.Debugf("rootless CNI: removed infra container %q", infra.ID()) + } + return errs.ErrorOrNil() +} + +func getRootlessCNIInfraLock(r *Runtime) (lockfile.Locker, error) { + fname := filepath.Join(r.config.Engine.TmpDir, "rootless-cni-infra.lck") + return lockfile.GetLockfile(fname) +} + +func getPodOrContainerName(c *Container) string { + pod, err := c.runtime.GetPod(c.PodID()) + if err != nil || pod.config.Name == "" { + return c.Name() + } + return pod.config.Name +} + +func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName string) (*cnitypes.Result, error) { + logrus.Debugf("rootless CNI: alloc %q, %q, %q", id, nw, k8sPodName) + var err error + + _, err = rootlessCNIInfraExec(infra, "alloc", id, nw, k8sPodName) + if err != nil { + return nil, err + } + cniResStr, err := rootlessCNIInfraExec(infra, "print-cni-result", id, nw) + if err != nil { + return nil, err + } + var cniRes cnitypes.Result + if err := json.Unmarshal([]byte(cniResStr), &cniRes); err != nil { + return nil, errors.Wrapf(err, "unmarshaling as cnitypes.Result: %q", cniResStr) + } + return &cniRes, nil +} + +func rootlessCNIInfraCallDelloc(infra *Container, id, nw string) error { + logrus.Debugf("rootless CNI: dealloc %q, %q", id, nw) + _, err := rootlessCNIInfraExec(infra, "dealloc", id, nw) + return err +} + +func rootlessCNIInfraIsIdle(infra *Container) (bool, error) { + type isIdle struct { + Idle bool `json:"idle"` + } + resStr, err := rootlessCNIInfraExec(infra, "is-idle") + if err != nil { + return false, err + } + var res isIdle + if err := json.Unmarshal([]byte(resStr), &res); err != nil { + return false, errors.Wrapf(err, "unmarshaling as isIdle: %q", resStr) + } + return res.Idle, nil +} + +func rootlessCNIInfraGetNS(infra *Container, id string) (ns.NetNS, error) { + type printNetnsPath struct { + Path string `json:"path"` + } + resStr, err := rootlessCNIInfraExec(infra, "print-netns-path", id) + if err != nil { + return nil, err + } + var res printNetnsPath + if err := json.Unmarshal([]byte(resStr), &res); err != nil { + return nil, errors.Wrapf(err, "unmarshaling as printNetnsPath: %q", resStr) + } + nsObj, err := ns.GetNS(res.Path) + if err != nil { + return nil, err + } + return nsObj, nil +} + +func getRootlessCNIInfraContainer(r *Runtime) (*Container, error) { + containers, err := r.GetContainersWithoutLock(func(c *Container) bool { + return c.Namespace() == rootlessCNIInfraContainerNamespace && + c.Name() == rootlessCNIInfraContainerName + }) + if err != nil { + return nil, err + } + if len(containers) == 0 { + return nil, nil + } + return containers[0], nil +} + +func ensureRootlessCNIInfraContainerRunning(ctx context.Context, r *Runtime) (*Container, error) { + c, err := getRootlessCNIInfraContainer(r) + if err != nil { + return nil, err + } + if c == nil { + return startRootlessCNIInfraContainer(ctx, r) + } + st, err := c.ContainerState() + if err != nil { + return nil, err + } + if st.State == define.ContainerStateRunning { + logrus.Debugf("rootless CNI: infra container %q is already running", c.ID()) + return c, nil + } + logrus.Debugf("rootless CNI: infra container %q is %q, being started", c.ID(), st.State) + if err := c.initAndStart(ctx); err != nil { + return nil, err + } + logrus.Debugf("rootless CNI: infra container %q is running", c.ID()) + return c, nil +} + +func startRootlessCNIInfraContainer(ctx context.Context, r *Runtime) (*Container, error) { + imageName, ok := rootlessCNIInfraImage[runtime.GOARCH] + if !ok { + return nil, errors.Errorf("cannot find rootless-podman-network-sandbox image for %s", runtime.GOARCH) + } + logrus.Debugf("rootless CNI: ensuring image %q to exist", imageName) + newImage, err := r.ImageRuntime().New(ctx, imageName, "", "", nil, nil, + image.SigningOptions{}, nil, util.PullImageMissing) + if err != nil { + return nil, err + } + logrus.Debugf("rootless CNI: image %q is ready", imageName) + + g, err := generate.New("linux") + if err != nil { + return nil, err + } + g.SetupPrivileged(true) + // Set --pid=host for ease of propagating "/proc/PID/ns/net" string + if err := g.RemoveLinuxNamespace(string(spec.PIDNamespace)); err != nil { + return nil, err + } + g.RemoveMount("/proc") + procMount := spec.Mount{ + Destination: "/proc", + Type: "bind", + Source: "/proc", + Options: []string{"rbind", "nosuid", "noexec", "nodev"}, + } + g.AddMount(procMount) + // Mount CNI networks + etcCNINetD := spec.Mount{ + Destination: "/etc/cni/net.d", + Type: "bind", + Source: r.config.Network.NetworkConfigDir, + Options: []string{"ro"}, + } + g.AddMount(etcCNINetD) + // FIXME: how to propagate ProcessArgs and Envs from Dockerfile? + g.SetProcessArgs([]string{"sleep", "infinity"}) + g.AddProcessEnv("CNI_PATH", "/opt/cni/bin") + var options []CtrCreateOption + options = append(options, WithRootFSFromImage(newImage.ID(), imageName, imageName)) + options = append(options, WithCtrNamespace(rootlessCNIInfraContainerNamespace)) + options = append(options, WithName(rootlessCNIInfraContainerName)) + options = append(options, WithPrivileged(true)) + options = append(options, WithSecLabels([]string{"disable"})) + options = append(options, WithRestartPolicy("always")) + options = append(options, WithNetNS(nil, false, "slirp4netns", nil)) + c, err := r.NewContainer(ctx, g.Config, options...) + if err != nil { + return nil, err + } + logrus.Debugf("rootless CNI infra container %q is created, now being started", c.ID()) + if err := c.initAndStart(ctx); err != nil { + return nil, err + } + logrus.Debugf("rootless CNI: infra container %q is running", c.ID()) + + return c, nil +} + +func rootlessCNIInfraExec(c *Container, args ...string) (string, error) { + cmd := "rootless-cni-infra" + var ( + outB bytes.Buffer + errB bytes.Buffer + streams define.AttachStreams + config ExecConfig + ) + streams.OutputStream = &nopWriteCloser{Writer: &outB} + streams.ErrorStream = &nopWriteCloser{Writer: &errB} + streams.AttachOutput = true + streams.AttachError = true + config.Command = append([]string{cmd}, args...) + config.Privileged = true + logrus.Debugf("rootlessCNIInfraExec: c.ID()=%s, config=%+v, streams=%v, begin", + c.ID(), config, streams) + code, err := c.Exec(&config, &streams, nil) + logrus.Debugf("rootlessCNIInfraExec: c.ID()=%s, config=%+v, streams=%v, end (code=%d, err=%v)", + c.ID(), config, streams, code, err) + if err != nil { + return "", err + } + if code != 0 { + return "", errors.Errorf("command %s %v in container %s failed with status %d, stdout=%q, stderr=%q", + cmd, args, c.ID(), code, outB.String(), errB.String()) + } + return outB.String(), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (nwc *nopWriteCloser) Close() error { + return nil +} diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 936dce2e9..241448981 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -772,7 +772,11 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) { func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error) { r.lock.RLock() defer r.lock.RUnlock() + return r.GetContainersWithoutLock(filters...) +} +// GetContainersWithoutLock is same as GetContainers but without lock +func (r *Runtime) GetContainersWithoutLock(filters ...ContainerFilter) ([]*Container, error) { if !r.valid { return nil, define.ErrRuntimeStopped } diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go index 80b7505df..87b947549 100644 --- a/pkg/api/handlers/compat/networks.go +++ b/pkg/api/handlers/compat/networks.go @@ -5,6 +5,7 @@ import ( "net" "net/http" "os" + "strings" "syscall" "time" @@ -177,9 +178,11 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } + + filterNames, nameFilterExists := query.Filters["name"] // TODO remove when filters are implemented - if len(query.Filters) > 0 { - utils.InternalServerError(w, errors.New("filters for listing networks is not implemented")) + if (!nameFilterExists && len(query.Filters) > 0) || len(query.Filters) > 1 { + utils.InternalServerError(w, errors.New("only the name filter for listing networks is implemented")) return } netNames, err := network.GetNetworkNamesFromFileSystem(config) @@ -187,6 +190,21 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) { utils.InternalServerError(w, err) return } + + // filter by name + if nameFilterExists { + names := []string{} + for _, name := range netNames { + for _, filter := range filterNames { + if strings.Contains(name, filter) { + names = append(names, name) + break + } + } + } + netNames = names + } + reports := make([]*types.NetworkResource, 0, len(netNames)) for _, name := range netNames { report, err := getNetworkResourceByName(name, runtime) diff --git a/pkg/api/handlers/libpod/networks.go b/pkg/api/handlers/libpod/networks.go index 475522664..dfece2a4e 100644 --- a/pkg/api/handlers/libpod/networks.go +++ b/pkg/api/handlers/libpod/networks.go @@ -42,7 +42,21 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) { } func ListNetworks(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) - options := entities.NetworkListOptions{} + decoder := r.Context().Value("decoder").(*schema.Decoder) + query := struct { + Filter string `schema:"filter"` + }{ + // override any golang type defaults + } + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String())) + return + } + + options := entities.NetworkListOptions{ + Filter: query.Filter, + } ic := abi.ContainerEngine{Libpod: runtime} reports, err := ic.NetworkList(r.Context(), options) if err != nil { diff --git a/pkg/api/server/register_networks.go b/pkg/api/server/register_networks.go index 7918ad4a2..61916eedf 100644 --- a/pkg/api/server/register_networks.go +++ b/pkg/api/server/register_networks.go @@ -61,6 +61,11 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error { // - networks (compat) // summary: List networks // description: Display summary of network configurations + // parameters: + // - in: query + // name: filters + // type: string + // description: JSON encoded value of the filters (a map[string][]string) to process on the networks list. Only the name filter is supported. // produces: // - application/json // responses: @@ -106,7 +111,7 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error { // required: true // description: the name of the network // - in: query - // name: Force + // name: force // type: boolean // description: remove containers associated with network // produces: @@ -152,6 +157,11 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error { // - networks // summary: List networks // description: Display summary of network configurations + // parameters: + // - in: query + // name: filter + // type: string + // description: Provide filter values (e.g. 'name=podman') // produces: // - application/json // responses: diff --git a/pkg/bindings/network/network.go b/pkg/bindings/network/network.go index fd1111282..d8dc7e352 100644 --- a/pkg/bindings/network/network.go +++ b/pkg/bindings/network/network.go @@ -70,7 +70,7 @@ func Remove(ctx context.Context, nameOrID string, force *bool) ([]*entities.Netw } // List returns a summary of all CNI network configurations -func List(ctx context.Context) ([]*entities.NetworkListReport, error) { +func List(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) { var ( netList []*entities.NetworkListReport ) @@ -78,7 +78,11 @@ func List(ctx context.Context) ([]*entities.NetworkListReport, error) { if err != nil { return nil, err } - response, err := conn.DoRequest(nil, http.MethodGet, "/networks/json", nil, nil) + params := url.Values{} + if options.Filter != "" { + params.Set("filter", options.Filter) + } + response, err := conn.DoRequest(nil, http.MethodGet, "/networks/json", params, nil) if err != nil { return netList, err } diff --git a/pkg/domain/entities/manifest.go b/pkg/domain/entities/manifest.go index 853619b19..01180951a 100644 --- a/pkg/domain/entities/manifest.go +++ b/pkg/domain/entities/manifest.go @@ -9,14 +9,19 @@ type ManifestCreateOptions struct { } type ManifestAddOptions struct { - All bool `json:"all" schema:"all"` - Annotation []string `json:"annotation" schema:"annotation"` - Arch string `json:"arch" schema:"arch"` - Features []string `json:"features" schema:"features"` - Images []string `json:"images" schema:"images"` - OS string `json:"os" schema:"os"` - OSVersion string `json:"os_version" schema:"os_version"` - Variant string `json:"variant" schema:"variant"` + All bool `json:"all" schema:"all"` + Annotation []string `json:"annotation" schema:"annotation"` + Arch string `json:"arch" schema:"arch"` + Authfile string `json:"-" schema:"-"` + CertDir string `json:"-" schema:"-"` + Features []string `json:"features" schema:"features"` + Images []string `json:"images" schema:"images"` + OS string `json:"os" schema:"os"` + OSVersion string `json:"os_version" schema:"os_version"` + Password string `json:"-" schema:"-"` + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` + Username string `json:"-" schema:"-"` + Variant string `json:"variant" schema:"variant"` } type ManifestAnnotateOptions struct { diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go index 6f3c6b902..55f73bf65 100644 --- a/pkg/domain/infra/abi/manifest.go +++ b/pkg/domain/infra/abi/manifest.go @@ -102,7 +102,24 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, opts entities.ManifestAd } manifestAddOpts.Annotation = annotations } - listID, err := listImage.AddManifest(*ir.Libpod.SystemContext(), manifestAddOpts) + + // Set the system context. + sys := ir.Libpod.SystemContext() + if sys != nil { + sys = &types.SystemContext{} + } + sys.AuthFilePath = opts.Authfile + sys.DockerInsecureSkipTLSVerify = opts.SkipTLSVerify + sys.DockerCertPath = opts.CertDir + + if opts.Username != "" && opts.Password != "" { + sys.DockerAuthConfig = &types.DockerAuthConfig{ + Username: opts.Username, + Password: opts.Password, + } + } + + listID, err := listImage.AddManifest(*sys, manifestAddOpts) if err != nil { return listID, err } @@ -191,6 +208,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, names []string, opts en } sys.AuthFilePath = opts.Authfile sys.DockerInsecureSkipTLSVerify = opts.SkipTLSVerify + sys.DockerCertPath = opts.CertDir if opts.Username != "" && opts.Password != "" { sys.DockerAuthConfig = &types.DockerAuthConfig{ diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go index c06714cbb..807e4b272 100644 --- a/pkg/domain/infra/abi/network.go +++ b/pkg/domain/infra/abi/network.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "os" "path/filepath" "strings" @@ -216,6 +217,9 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate if err != nil { return "", err } + if err := os.MkdirAll(network.GetCNIConfDir(runtimeConfig), 0755); err != nil { + return "", err + } cniPathName := filepath.Join(network.GetCNIConfDir(runtimeConfig), fmt.Sprintf("%s.conflist", name)) err = ioutil.WriteFile(cniPathName, b, 0644) return cniPathName, err diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index cc919561f..062b38a70 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -8,11 +8,13 @@ import ( "os" "strconv" "strings" + "sync" "time" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/podman/v2/libpod/define" + "github.com/containers/podman/v2/libpod/events" "github.com/containers/podman/v2/pkg/api/handlers" "github.com/containers/podman/v2/pkg/bindings" "github.com/containers/podman/v2/pkg/bindings/containers" @@ -507,33 +509,90 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta for _, w := range con.Warnings { fmt.Fprintf(os.Stderr, "%s\n", w) } + report := entities.ContainerRunReport{Id: con.ID} - // Attach - if !opts.Detach { - err = startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream) - if err == nil { - exitCode, err := containers.Wait(ic.ClientCxt, con.ID, nil) - if err == nil { - report.ExitCode = int(exitCode) - } + + if opts.Detach { + // Detach and return early + err := containers.Start(ic.ClientCxt, con.ID, nil) + if err != nil { + report.ExitCode = define.ExitCode(err) } - } else { - err = containers.Start(ic.ClientCxt, con.ID, nil) + return &report, err } - if err != nil { + + // Attach + if err := startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream); err != nil { report.ExitCode = define.ExitCode(err) + if opts.Rm { + if rmErr := containers.Remove(ic.ClientCxt, con.ID, bindings.PFalse, bindings.PTrue); rmErr != nil { + logrus.Debugf("unable to remove container %s after failing to start and attach to it", con.ID) + } + } + return &report, err } + if opts.Rm { - if err := containers.Remove(ic.ClientCxt, con.ID, bindings.PFalse, bindings.PTrue); err != nil { - if errors.Cause(err) == define.ErrNoSuchCtr || - errors.Cause(err) == define.ErrCtrRemoved { - logrus.Warnf("Container %s does not exist: %v", con.ID, err) - } else { - logrus.Errorf("Error removing container %s: %v", con.ID, err) + // Defer the removal, so we can return early if needed and + // de-spaghetti the code. + defer func() { + if err := containers.Remove(ic.ClientCxt, con.ID, bindings.PFalse, bindings.PTrue); err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr || + errors.Cause(err) == define.ErrCtrRemoved { + logrus.Warnf("Container %s does not exist: %v", con.ID, err) + } else { + logrus.Errorf("Error removing container %s: %v", con.ID, err) + } } + }() + } + + // Wait + exitCode, waitErr := containers.Wait(ic.ClientCxt, con.ID, nil) + if waitErr == nil { + report.ExitCode = int(exitCode) + return &report, nil + } + + // Determine why the wait failed. If the container doesn't exist, + // consult the events. + if !strings.Contains(waitErr.Error(), define.ErrNoSuchCtr.Error()) { + return &report, waitErr + } + + // Events + eventsChannel := make(chan *events.Event) + eventOptions := entities.EventsOptions{ + EventChan: eventsChannel, + Filter: []string{ + "type=container", + fmt.Sprintf("container=%s", con.ID), + fmt.Sprintf("event=%s", events.Exited), + }, + } + + var lastEvent *events.Event + var mutex sync.Mutex + mutex.Lock() + // Read the events. + go func() { + for e := range eventsChannel { + lastEvent = e } + mutex.Unlock() + }() + + eventsErr := ic.Events(ctx, eventOptions) + + // Wait for all events to be read + mutex.Lock() + if eventsErr != nil || lastEvent == nil { + logrus.Errorf("Cannot get exit code: %v", err) + report.ExitCode = define.ExecErrorCodeNotFound + return &report, nil // compat with local client } + report.ExitCode = lastEvent.ContainerExitCode return &report, err } diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go index 2b197cac0..074425087 100644 --- a/pkg/domain/infra/tunnel/network.go +++ b/pkg/domain/infra/tunnel/network.go @@ -8,7 +8,7 @@ import ( ) func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) { - return network.List(ic.ClientCxt) + return network.List(ic.ClientCxt, options) } func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.NetworkInspectOptions) ([]entities.NetworkInspectReport, error) { diff --git a/pkg/network/files.go b/pkg/network/files.go index 38ce38b97..a2090491f 100644 --- a/pkg/network/files.go +++ b/pkg/network/files.go @@ -14,11 +14,16 @@ import ( "github.com/pkg/errors" ) -func GetCNIConfDir(config *config.Config) string { - if len(config.Network.NetworkConfigDir) < 1 { - return CNIConfigDir +func GetCNIConfDir(configArg *config.Config) string { + if len(configArg.Network.NetworkConfigDir) < 1 { + dc, err := config.DefaultConfig() + if err != nil { + // Fallback to hard-coded dir + return CNIConfigDir + } + return dc.Network.NetworkConfigDir } - return config.Network.NetworkConfigDir + return configArg.Network.NetworkConfigDir } // LoadCNIConfsFromDir loads all the CNI configurations from a dir diff --git a/rootless.md b/rootless.md index 196ed52c3..22b03e340 100644 --- a/rootless.md +++ b/rootless.md @@ -28,9 +28,6 @@ can easily fail * Can not use overlayfs driver, but does support fuse-overlayfs * Ubuntu supports non root overlay, but no other Linux distros do. * Only other supported driver is VFS. -* No CNI Support - * CNI wants to modify IPTables, plus other network manipulation that requires CAP_SYS_ADMIN. - * There is potential we could probably do some sort of denylisting of the relevant plugins, and add a new plugin for rootless networking - slirp4netns as one example and there may be others * Cannot use ping out of the box. * [(Can be fixed by setting sysctl on host)](https://github.com/containers/podman/blob/master/troubleshooting.md#6-rootless-containers-cannot-ping-hosts) * Requires new shadow-utils (not found in older (RHEL7/Centos7 distros) Should be fixed in RHEL7.7 release) diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at index 4c032c072..143d6c07b 100644 --- a/test/apiv2/35-networks.at +++ b/test/apiv2/35-networks.at @@ -21,6 +21,27 @@ if root; then t POST libpod/networks/create '"Subnet":{"IP":"10.10.1.0","Mask":[0,255,255,0]}' 500 \ .cause~'.*mask is invalid' + # network list + t GET libpod/networks/json 200 + t GET libpod/networks/json?filter=name=network1 200 \ + length=1 \ + .[0].Name=network1 + t GET networks 200 + + #network list docker endpoint + #filters={"name":["network1","network2"]} + t GET networks?filters=%7B%22name%22%3A%5B%22network1%22%2C%22network2%22%5D%7D 200 \ + length=2 + #filters={"name":["network"]} + t GET networks?filters=%7B%22name%22%3A%5B%22network%22%5D%7D 200 \ + length=2 + # invalid filter filters={"label":"abc"} + t GET networks?filters=%7B%22label%22%3A%5B%22abc%22%5D%7D 500 \ + .cause="only the name filter for listing networks is implemented" + # invalid filter filters={"label":"abc","name":["network"]} + t GET networks?filters=%7B%22label%22%3A%22abc%22%2C%22name%22%3A%5B%22network%22%5D%7D 500 \ + .cause="only the name filter for listing networks is implemented" + # clean the network t DELETE libpod/networks/network1 200 \ .[0].Name~network1 \ diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index ed55484e3..b6bbae15b 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -245,6 +245,12 @@ func PodmanTestCreateUtil(tempDir string, remote bool) *PodmanTestIntegration { } os.Setenv("DISABLE_HC_SYSTEMD", "true") CNIConfigDir := "/etc/cni/net.d" + if rootless.IsRootless() { + CNIConfigDir = filepath.Join(os.Getenv("HOME"), ".config/cni/net.d") + } + if err := os.MkdirAll(CNIConfigDir, 0755); err != nil { + panic(err) + } storageFs := STORAGE_FS if rootless.IsRootless() { diff --git a/test/e2e/network_create_test.go b/test/e2e/network_create_test.go index f97e6c1f1..13d515d8e 100644 --- a/test/e2e/network_create_test.go +++ b/test/e2e/network_create_test.go @@ -74,7 +74,6 @@ var _ = Describe("Podman network create", func() { ) BeforeEach(func() { - SkipIfRootless() tempdir, err = CreateTempDirInTempDir() if err != nil { os.Exit(1) @@ -180,6 +179,7 @@ var _ = Describe("Podman network create", func() { It("podman network create with name and IPv6 subnet", func() { SkipIfRemote() + SkipIfRootless() var ( results []network.NcList ) diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go index f427afa67..c35b82fc1 100644 --- a/test/e2e/network_test.go +++ b/test/e2e/network_test.go @@ -1,5 +1,3 @@ -// +build !remote - package integration import ( @@ -9,6 +7,7 @@ import ( "path/filepath" "strings" + "github.com/containers/podman/v2/pkg/rootless" . "github.com/containers/podman/v2/test/utils" "github.com/containers/storage/pkg/stringid" . "github.com/onsi/ginkgo" @@ -34,7 +33,6 @@ var _ = Describe("Podman network", func() { ) BeforeEach(func() { - SkipIfRootless() tempdir, err = CreateTempDirInTempDir() if err != nil { os.Exit(1) @@ -76,13 +74,12 @@ var _ = Describe("Podman network", func() { } ] }` - cniPath = "/etc/cni/net.d" ) It("podman network list", func() { // Setup, use uuid to prevent conflict with other tests uuid := stringid.GenerateNonCryptoID() - secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid)) + secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid)) writeConf([]byte(secondConf), secondPath) defer removeConf(secondPath) @@ -95,7 +92,7 @@ var _ = Describe("Podman network", func() { It("podman network list -q", func() { // Setup, use uuid to prevent conflict with other tests uuid := stringid.GenerateNonCryptoID() - secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid)) + secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid)) writeConf([]byte(secondConf), secondPath) defer removeConf(secondPath) @@ -108,7 +105,7 @@ var _ = Describe("Podman network", func() { It("podman network list --filter success", func() { // Setup, use uuid to prevent conflict with other tests uuid := stringid.GenerateNonCryptoID() - secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid)) + secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid)) writeConf([]byte(secondConf), secondPath) defer removeConf(secondPath) @@ -121,7 +118,7 @@ var _ = Describe("Podman network", func() { It("podman network list --filter failure", func() { // Setup, use uuid to prevent conflict with other tests uuid := stringid.GenerateNonCryptoID() - secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid)) + secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid)) writeConf([]byte(secondConf), secondPath) defer removeConf(secondPath) @@ -140,7 +137,7 @@ var _ = Describe("Podman network", func() { It("podman network rm", func() { // Setup, use uuid to prevent conflict with other tests uuid := stringid.GenerateNonCryptoID() - secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid)) + secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid)) writeConf([]byte(secondConf), secondPath) defer removeConf(secondPath) @@ -168,11 +165,16 @@ var _ = Describe("Podman network", func() { It("podman network inspect", func() { // Setup, use uuid to prevent conflict with other tests uuid := stringid.GenerateNonCryptoID() - secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid)) + secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid)) writeConf([]byte(secondConf), secondPath) defer removeConf(secondPath) - session := podmanTest.Podman([]string{"network", "inspect", "podman-integrationtest", "podman"}) + expectedNetworks := []string{"podman-integrationtest"} + if !rootless.IsRootless() { + // rootful image contains "podman/cni/87-podman-bridge.conflist" for "podman" network + expectedNetworks = append(expectedNetworks, "podman") + } + session := podmanTest.Podman(append([]string{"network", "inspect"}, expectedNetworks...)) session.WaitWithDefaultTimeout() Expect(session.ExitCode()).To(Equal(0)) Expect(session.IsJSONOutputValid()).To(BeTrue()) @@ -181,7 +183,7 @@ var _ = Describe("Podman network", func() { It("podman network inspect", func() { // Setup, use uuid to prevent conflict with other tests uuid := stringid.GenerateNonCryptoID() - secondPath := filepath.Join(cniPath, fmt.Sprintf("%s.conflist", uuid)) + secondPath := filepath.Join(podmanTest.CNIConfigDir, fmt.Sprintf("%s.conflist", uuid)) writeConf([]byte(secondConf), secondPath) defer removeConf(secondPath) diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go index 25f8d0d15..8d860cfc3 100644 --- a/test/e2e/run_userns_test.go +++ b/test/e2e/run_userns_test.go @@ -277,6 +277,13 @@ var _ = Describe("Podman UserNS support", func() { ok, _ := session.GrepString("4998") Expect(ok).To(BeTrue()) + + session = podmanTest.Podman([]string{"run", "--rm", "--userns=container:" + ctrName, "--net=container:" + ctrName, "alpine", "cat", "/proc/self/uid_map"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + ok, _ = session.GrepString("4998") + Expect(ok).To(BeTrue()) }) It("podman --user with volume", func() { diff --git a/test/system/055-rm.bats b/test/system/055-rm.bats index 478ba0f20..c8475c3e9 100644 --- a/test/system/055-rm.bats +++ b/test/system/055-rm.bats @@ -44,8 +44,6 @@ load helpers # # See https://github.com/containers/podman/issues/3795 @test "podman rm -f" { - skip_if_remote "FIXME: pending #7117" - rand=$(random_string 30) ( sleep 3; run_podman rm -f $rand ) & run_podman 137 run --name $rand $IMAGE sleep 30 diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 27ddcc14d..ca8ec414e 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.23.4 +1.23.5 diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index 7a568273f..39db66641 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -8,7 +8,7 @@ require ( github.com/Microsoft/hcsshim v0.8.9 github.com/docker/go-units v0.4.0 github.com/hashicorp/go-multierror v1.1.0 - github.com/klauspost/compress v1.10.11 + github.com/klauspost/compress v1.11.0 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-shellwords v1.0.10 github.com/mistifyio/go-zfs v2.1.1+incompatible diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index dba69025f..d1fb711b1 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -62,8 +62,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk= -github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index d65d52718..b22263fe4 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -198,7 +198,7 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) { } // getRootlessStorageOpts returns the storage opts for containers running as non root -func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) { +func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) @@ -207,10 +207,20 @@ func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) { } opts.RunRoot = rootlessRuntime opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") - opts.RootlessStoragePath = opts.GraphRoot + if systemOpts.RootlessStoragePath != "" { + opts.RootlessStoragePath = systemOpts.RootlessStoragePath + } else { + opts.RootlessStoragePath = opts.GraphRoot + } if path, err := exec.LookPath("fuse-overlayfs"); err == nil { opts.GraphDriverName = "overlay" opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break + } + } } else { opts.GraphDriverName = "vfs" } @@ -242,7 +252,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str ) storageOpts := defaultStoreOptions if rootless && rootlessUID != 0 { - storageOpts, err = getRootlessStorageOpts(rootlessUID) + storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) if err != nil { return storageOpts, err } diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 6d4c1e98b..4a73e1bdd 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -127,7 +127,7 @@ func (e *fastGen) addBlock(src []byte) int32 { // hash4 returns the hash of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <32. func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & 31) + return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32) } type tableEntryPrev struct { @@ -138,25 +138,25 @@ type tableEntryPrev struct { // hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <32. func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) + return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32) } // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) } // hash8 returns the hash of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & 63)) + return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64)) } // hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. // Preferably h should be a constant and should always be <64. func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64)) } // matchlen will return the match length between offsets and t in src. diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go index c74a95fe7..b26d19ec2 100644 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go @@ -85,7 +85,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -104,7 +104,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -167,15 +167,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -183,17 +183,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -202,9 +204,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -212,10 +214,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -225,7 +227,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -233,7 +235,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 53fe1d06e..208d66711 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -206,7 +206,7 @@ func (w *huffmanBitWriter) write(b []byte) { } func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { - w.bits |= uint64(b) << (w.nbits & 63) + w.bits |= uint64(b) << (w.nbits & reg16SizeMask64) w.nbits += nb if w.nbits >= 48 { w.writeOutBits() @@ -759,7 +759,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := lengths[lengthCode&31] - w.bits |= uint64(c.code) << (w.nbits & 63) + w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -779,7 +779,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := offs[offsetCode&31] - w.bits |= uint64(c.code) << (w.nbits & 63) + w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -878,7 +878,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] - w.bits |= uint64(c.code) << ((w.nbits) & 63) + w.bits |= uint64(c.code) << ((w.nbits) & reg16SizeMask64) w.nbits += c.len if w.nbits >= 48 { bits := w.bits diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 3e4259f15..189e9fe0b 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -522,8 +522,8 @@ func (f *decompressor) readHuffman() error { return err } } - rep += int(f.b & uint32(1<<nb-1)) - f.b >>= nb + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb if i+rep > n { if debugDecode { @@ -603,7 +603,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -622,7 +622,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -685,12 +685,12 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { if err = f.moreBits(); err != nil { @@ -701,17 +701,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -720,7 +722,7 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { if err = f.moreBits(); err != nil { if debugDecode { @@ -730,10 +732,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -743,7 +745,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -751,7 +753,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -869,7 +871,7 @@ func (f *decompressor) moreBits() error { return noEOF(err) } f.roffset++ - f.b |= uint32(c) << f.nb + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) f.nb += 8 return nil } @@ -894,7 +896,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { return 0, noEOF(err) } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := h.chunks[b&(huffmanNumChunks-1)] @@ -913,7 +915,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { f.err = CorruptInputError(f.roffset) return 0, f.err } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n return int(chunk >> huffmanValueShift), nil } diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index 397dc1b1a..9a92a1b30 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -63,7 +63,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -82,7 +82,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -145,15 +145,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -161,17 +161,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -180,9 +182,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -190,10 +192,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -203,7 +205,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -211,7 +213,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -287,7 +289,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -306,7 +308,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -369,15 +371,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -385,17 +387,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -404,9 +408,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -414,10 +418,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -427,7 +431,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -435,7 +439,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -511,7 +515,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -530,7 +534,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -593,15 +597,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -609,17 +613,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -628,9 +634,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -638,10 +644,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -651,7 +657,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -659,7 +665,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } @@ -735,7 +741,7 @@ readLiteral: return } f.roffset++ - b |= uint32(c) << (nb & 31) + b |= uint32(c) << (nb & regSizeMaskUint32) nb += 8 } chunk := f.hl.chunks[b&(huffmanNumChunks-1)] @@ -754,7 +760,7 @@ readLiteral: f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & 31) + f.b = b >> (n & regSizeMaskUint32) f.nb = nb - n v = int(chunk >> huffmanValueShift) break @@ -817,15 +823,15 @@ readLiteral: return } } - length += int(f.b & uint32(1<<n-1)) - f.b >>= n + length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) + f.b >>= n & regSizeMaskUint32 f.nb -= n } - var dist int + var dist uint32 if f.hd == nil { for f.nb < 5 { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -833,17 +839,19 @@ readLiteral: return } } - dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) f.b >>= 5 f.nb -= 5 } else { - if dist, err = f.huffSym(f.hd); err != nil { + sym, err := f.huffSym(f.hd) + if err != nil { if debugDecode { fmt.Println("huffsym:", err) } f.err = err return } + dist = uint32(sym) } switch { @@ -852,9 +860,9 @@ readLiteral: case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << nb + extra := (dist & 1) << (nb & regSizeMaskUint32) for f.nb < nb { - if err = moreBits(); err != nil { + if err = f.moreBits(); err != nil { if debugDecode { fmt.Println("morebits f.nb<nb:", err) } @@ -862,10 +870,10 @@ readLiteral: return } } - extra |= int(f.b & uint32(1<<nb-1)) - f.b >>= nb + extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) + f.b >>= nb & regSizeMaskUint32 f.nb -= nb - dist = 1<<(nb+1) + 1 + extra + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra default: if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) @@ -875,7 +883,7 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > f.dict.histSize() { + if dist > uint32(f.dict.histSize()) { if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -883,7 +891,7 @@ readLiteral: return } - f.copyLen, f.copyDist = length, dist + f.copyLen, f.copyDist = length, int(dist) goto copyHistory } diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go new file mode 100644 index 000000000..6ed28061b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -0,0 +1,37 @@ +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go new file mode 100644 index 000000000..f477a5d6e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -0,0 +1,39 @@ +//+build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 5dd66854b..7ec2022b6 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -119,6 +119,16 @@ type Scratch struct { huffWeight [maxSymbolValue + 1]byte } +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + func (s *Scratch) prepare(in []byte) (*Scratch, error) { if len(in) > BlockSizeMax { return nil, ErrTooBig diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index ac3640dc9..ea3e51082 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -5,7 +5,6 @@ It offers a very wide range of compression / speed trade-off, while being backed A high performance compression algorithm is implemented. For now focused on speed. This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. -Note that custom dictionaries are only supported for decompression. This package is pure Go and without use of "unsafe". @@ -232,41 +231,6 @@ nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 ``` -### Converters - -As part of the development process a *Snappy* -> *Zstandard* converter was also built. - -This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. -Note that a single block is not framed. - -Conversion is done by converting the stream directly from Snappy without intermediate full decoding. -Therefore the compression ratio is much less than what can be done by a full decompression -and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -any errors being generated. -No CRC value is being generated and not all CRC values of the Snappy stream are checked. -However, it provides really fast re-compression of Snappy streams. - - -``` -BenchmarkSnappy_ConvertSilesia-8 1 1156001600 ns/op 183.35 MB/s -Snappy len 103008711 -> zstd len 82687318 - -BenchmarkSnappy_Enwik9-8 1 6472998400 ns/op 154.49 MB/s -Snappy len 508028601 -> zstd len 390921079 -``` - - -```Go - s := zstd.SnappyConverter{} - n, err = s.Convert(input, output) - if err != nil { - fmt.Println("Re-compressed stream to", n, "bytes") - } -``` - -The converter `s` can be reused to avoid allocations, even after errors. - - ## Decompressor Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. @@ -337,6 +301,21 @@ A re-used Decoder will still contain the dictionaries registered. When registering multiple dictionaries with the same ID, the last one will be used. +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + ### Allocation-less operation The decoder has been designed to operate without allocations after a warmup. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index c8ec6e331..4733ea876 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -646,7 +646,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { } } else { if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litDec != hist.huffTree { + if hist.dict == nil || hist.dict.litEnc != hist.huffTree { huffDecoderPool.Put(hist.huffTree) } hist.huffTree = nil diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index be718afd4..083fbb502 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -14,12 +14,13 @@ import ( ) type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - wr bitWriter + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter extraLits int last bool @@ -314,19 +315,19 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { } // encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(raw bool) error { +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { var bh blockHeader bh.setLast(b.last) - bh.setSize(uint32(len(b.literals))) + bh.setSize(uint32(len(lits))) // Don't compress extremely small blocks - if len(b.literals) < 32 || raw { + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { if debug { - println("Adding RAW block, length", len(b.literals), "last:", b.last) + println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) b.output = bh.appendTo(b.output) - b.output = append(b.output, b.literals...) + b.output = append(b.output, lits...) return nil } @@ -335,13 +336,18 @@ func (b *blockEnc) encodeLits(raw bool) error { reUsed, single bool err error ) - if len(b.literals) >= 1024 { + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 { + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { // Use 1 stream single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) } else { err = huff0.ErrIncompressible } @@ -349,19 +355,19 @@ func (b *blockEnc) encodeLits(raw bool) error { switch err { case huff0.ErrIncompressible: if debug { - println("Adding RAW block, length", len(b.literals), "last:", b.last) + println("Adding RAW block, length", len(lits), "last:", b.last) } bh.setType(blockTypeRaw) b.output = bh.appendTo(b.output) - b.output = append(b.output, b.literals...) + b.output = append(b.output, lits...) return nil case huff0.ErrUseRLE: if debug { - println("Adding RLE block, length", len(b.literals)) + println("Adding RLE block, length", len(lits)) } bh.setType(blockTypeRLE) b.output = bh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) + b.output = append(b.output, lits[0]) return nil default: return err @@ -384,7 +390,7 @@ func (b *blockEnc) encodeLits(raw bool) error { lh.setType(literalsBlockCompressed) } // Set sizes - lh.setSizes(len(out), len(b.literals), single) + lh.setSizes(len(out), len(lits), single) bh.setSize(uint32(len(out) + lh.size() + 1)) // Write block headers. @@ -444,13 +450,19 @@ func fuzzFseEncoder(data []byte) int { } // encode will encode the block and append the output in b.output. -func (b *blockEnc) encode(raw, rawAllLits bool) error { +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { - return b.encodeLits(rawAllLits) + return b.encodeLits(b.literals, rawAllLits) } - // We want some difference - if len(b.literals) > (b.size - (b.size >> 5)) { - return errIncompressible + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) } var bh blockHeader @@ -466,6 +478,11 @@ func (b *blockEnc) encode(raw, rawAllLits bool) error { reUsed, single bool err error ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } if len(b.literals) >= 1024 && !raw { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 66b51bf2d..d78be6d42 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -187,7 +187,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.current.err = err d.current.flushed = true if debug { - println("sync decode to ", len(dst), "bytes, err:", err) + println("sync decode to", len(dst), "bytes, err:", err) } return nil } @@ -303,6 +303,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { frame.history.reset() err := frame.reset(&frame.bBuf) if err == io.EOF { + if debug { + println("frame reset return EOF") + } return dst, nil } if frame.DictionaryID != nil { @@ -341,6 +344,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { return dst, err } if len(frame.bBuf) == 0 { + if debug { + println("frame dbuf empty") + } break } } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8eb6f6ba3..fa25a18d8 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -13,14 +13,31 @@ import ( type dict struct { id uint32 - litDec *huff0.Scratch + litEnc *huff0.Scratch llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte } var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + // Load a dictionary as described in // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format func loadDict(b []byte) (*dict, error) { @@ -43,10 +60,11 @@ func loadDict(b []byte) (*dict, error) { // Read literal table var err error - d.litDec, b, err = huff0.ReadTable(b[8:], nil) + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) if err != nil { return nil, err } + d.litEnc.Reuse = huff0.ReusePolicyMust br := byteReader{ b: b, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 000000000..b1b7c6e6a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,155 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int) int32 { + if size > 0 && size < int(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + l := e.maxMatchOff * 2 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } else { + if cap(e.hist) < int(e.maxMatchOff*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { + l := e.maxMatchOff*2 + int32(d.DictContentSize()) + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index c120d9054..94a5343d0 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -31,8 +31,10 @@ type prevEntry struct { // and that it is longer (lazy matching). type betterFastEncoder struct { fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry + dictTable []tableEntry + dictLongTable []prevEntry } // Encode improves compression... @@ -516,3 +518,78 @@ encodeLoop: func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { e.Encode(blk, src) } + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash5(cv, hashLog) // 0 -> 4 + nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 50276bcde..19eebf66e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -18,7 +18,8 @@ const ( type doubleFastEncoder struct { fastEncoder - longTable [dFastLongTableSize]tableEntry + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry } // Encode mimmics functionality in zstd_dfast.c @@ -494,7 +495,7 @@ encodeLoop: // but the likelihood of both the first 4 bytes and the hash matching should be enough. t = candidateL.offset - e.cur if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) } if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") @@ -676,3 +677,37 @@ encodeLoop: e.cur += int32(len(src)) } } + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + } + // Reset table to initial state + e.cur = e.maxMatchOff + copy(e.longTable[:], e.dictLongTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 4104b456c..0b301df43 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -8,8 +8,6 @@ import ( "fmt" "math" "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" ) const ( @@ -24,51 +22,10 @@ type tableEntry struct { offset int32 } -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc -} - type fastEncoder struct { fastBase - table [tableSize]tableEntry -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int) int32 { - if size > 0 && size < int(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk + table [tableSize]tableEntry + dictTable []tableEntry } // Encode mimmics functionality in zstd_fast.c @@ -660,96 +617,45 @@ encodeLoop: } } -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - l := e.maxMatchOff * 2 - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 - } - e.hist = make([]byte, 0, l) - } else { - if cap(e.hist) < int(e.maxMatchOff*2) { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash6(cv, hashLog) // 0 -> 5 + nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 + nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } } + e.lastDictID = d.id } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) Reset(singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - if !singleBlock && cap(e.hist) < int(e.maxMatchOff*2) { - l := e.maxMatchOff * 2 - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 - } - e.hist = make([]byte, 0, l) - } - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_params.go b/vendor/github.com/klauspost/compress/zstd/enc_params.go deleted file mode 100644 index d874116f7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_params.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -/* -// encParams are not really used, just here for reference. -type encParams struct { - // largest match distance : larger == more compression, more memory needed during decompression - windowLog uint8 - - // fully searched segment : larger == more compression, slower, more memory (useless for fast) - chainLog uint8 - - // dispatch table : larger == faster, more memory - hashLog uint8 - - // < nb of searches : larger == more compression, slower - searchLog uint8 - - // < match length searched : larger == faster decompression, sometimes less compression - minMatch uint8 - - // acceptable match size for optimal parser (only) : larger == more compression, slower - targetLength uint32 - - // see ZSTD_strategy definition above - strategy strategy -} - -// strategy defines the algorithm to use when generating sequences. -type strategy uint8 - -const ( - // Compression strategies, listed from fastest to strongest - strategyFast strategy = iota + 1 - strategyDfast - strategyGreedy - strategyLazy - strategyLazy2 - strategyBtlazy2 - strategyBtopt - strategyBtultra - strategyBtultra2 - // note : new strategies _might_ be added in the future. - // Only the order (from fast to strong) is guaranteed - -) - -var defEncParams = [4][]encParams{ - { // "default" - for any srcSize > 256 KB - // W, C, H, S, L, TL, strat - {19, 12, 13, 1, 6, 1, strategyFast}, // base for negative levels - {19, 13, 14, 1, 7, 0, strategyFast}, // level 1 - {20, 15, 16, 1, 6, 0, strategyFast}, // level 2 - {21, 16, 17, 1, 5, 1, strategyDfast}, // level 3 - {21, 18, 18, 1, 5, 1, strategyDfast}, // level 4 - {21, 18, 19, 2, 5, 2, strategyGreedy}, // level 5 - {21, 19, 19, 3, 5, 4, strategyGreedy}, // level 6 - {21, 19, 19, 3, 5, 8, strategyLazy}, // level 7 - {21, 19, 19, 3, 5, 16, strategyLazy2}, // level 8 - {21, 19, 20, 4, 5, 16, strategyLazy2}, // level 9 - {22, 20, 21, 4, 5, 16, strategyLazy2}, // level 10 - {22, 21, 22, 4, 5, 16, strategyLazy2}, // level 11 - {22, 21, 22, 5, 5, 16, strategyLazy2}, // level 12 - {22, 21, 22, 5, 5, 32, strategyBtlazy2}, // level 13 - {22, 22, 23, 5, 5, 32, strategyBtlazy2}, // level 14 - {22, 23, 23, 6, 5, 32, strategyBtlazy2}, // level 15 - {22, 22, 22, 5, 5, 48, strategyBtopt}, // level 16 - {23, 23, 22, 5, 4, 64, strategyBtopt}, // level 17 - {23, 23, 22, 6, 3, 64, strategyBtultra}, // level 18 - {23, 24, 22, 7, 3, 256, strategyBtultra2}, // level 19 - {25, 25, 23, 7, 3, 256, strategyBtultra2}, // level 20 - {26, 26, 24, 7, 3, 512, strategyBtultra2}, // level 21 - {27, 27, 25, 9, 3, 999, strategyBtultra2}, // level 22 - }, - { // for srcSize <= 256 KB - // W, C, H, S, L, T, strat - {18, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels - {18, 13, 14, 1, 6, 0, strategyFast}, // level 1 - {18, 14, 14, 1, 5, 1, strategyDfast}, // level 2 - {18, 16, 16, 1, 4, 1, strategyDfast}, // level 3 - {18, 16, 17, 2, 5, 2, strategyGreedy}, // level 4. - {18, 18, 18, 3, 5, 2, strategyGreedy}, // level 5. - {18, 18, 19, 3, 5, 4, strategyLazy}, // level 6. - {18, 18, 19, 4, 4, 4, strategyLazy}, // level 7 - {18, 18, 19, 4, 4, 8, strategyLazy2}, // level 8 - {18, 18, 19, 5, 4, 8, strategyLazy2}, // level 9 - {18, 18, 19, 6, 4, 8, strategyLazy2}, // level 10 - {18, 18, 19, 5, 4, 12, strategyBtlazy2}, // level 11. - {18, 19, 19, 7, 4, 12, strategyBtlazy2}, // level 12. - {18, 18, 19, 4, 4, 16, strategyBtopt}, // level 13 - {18, 18, 19, 4, 3, 32, strategyBtopt}, // level 14. - {18, 18, 19, 6, 3, 128, strategyBtopt}, // level 15. - {18, 19, 19, 6, 3, 128, strategyBtultra}, // level 16. - {18, 19, 19, 8, 3, 256, strategyBtultra}, // level 17. - {18, 19, 19, 6, 3, 128, strategyBtultra2}, // level 18. - {18, 19, 19, 8, 3, 256, strategyBtultra2}, // level 19. - {18, 19, 19, 10, 3, 512, strategyBtultra2}, // level 20. - {18, 19, 19, 12, 3, 512, strategyBtultra2}, // level 21. - {18, 19, 19, 13, 3, 999, strategyBtultra2}, // level 22. - }, - { // for srcSize <= 128 KB - // W, C, H, S, L, T, strat - {17, 12, 12, 1, 5, 1, strategyFast}, // base for negative levels - {17, 12, 13, 1, 6, 0, strategyFast}, // level 1 - {17, 13, 15, 1, 5, 0, strategyFast}, // level 2 - {17, 15, 16, 2, 5, 1, strategyDfast}, // level 3 - {17, 17, 17, 2, 4, 1, strategyDfast}, // level 4 - {17, 16, 17, 3, 4, 2, strategyGreedy}, // level 5 - {17, 17, 17, 3, 4, 4, strategyLazy}, // level 6 - {17, 17, 17, 3, 4, 8, strategyLazy2}, // level 7 - {17, 17, 17, 4, 4, 8, strategyLazy2}, // level 8 - {17, 17, 17, 5, 4, 8, strategyLazy2}, // level 9 - {17, 17, 17, 6, 4, 8, strategyLazy2}, // level 10 - {17, 17, 17, 5, 4, 8, strategyBtlazy2}, // level 11 - {17, 18, 17, 7, 4, 12, strategyBtlazy2}, // level 12 - {17, 18, 17, 3, 4, 12, strategyBtopt}, // level 13. - {17, 18, 17, 4, 3, 32, strategyBtopt}, // level 14. - {17, 18, 17, 6, 3, 256, strategyBtopt}, // level 15. - {17, 18, 17, 6, 3, 128, strategyBtultra}, // level 16. - {17, 18, 17, 8, 3, 256, strategyBtultra}, // level 17. - {17, 18, 17, 10, 3, 512, strategyBtultra}, // level 18. - {17, 18, 17, 5, 3, 256, strategyBtultra2}, // level 19. - {17, 18, 17, 7, 3, 512, strategyBtultra2}, // level 20. - {17, 18, 17, 9, 3, 512, strategyBtultra2}, // level 21. - {17, 18, 17, 11, 3, 999, strategyBtultra2}, // level 22. - }, - { // for srcSize <= 16 KB - // W, C, H, S, L, T, strat - {14, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels - {14, 14, 15, 1, 5, 0, strategyFast}, // level 1 - {14, 14, 15, 1, 4, 0, strategyFast}, // level 2 - {14, 14, 15, 2, 4, 1, strategyDfast}, // level 3 - {14, 14, 14, 4, 4, 2, strategyGreedy}, // level 4 - {14, 14, 14, 3, 4, 4, strategyLazy}, // level 5. - {14, 14, 14, 4, 4, 8, strategyLazy2}, // level 6 - {14, 14, 14, 6, 4, 8, strategyLazy2}, // level 7 - {14, 14, 14, 8, 4, 8, strategyLazy2}, // level 8. - {14, 15, 14, 5, 4, 8, strategyBtlazy2}, // level 9. - {14, 15, 14, 9, 4, 8, strategyBtlazy2}, // level 10. - {14, 15, 14, 3, 4, 12, strategyBtopt}, // level 11. - {14, 15, 14, 4, 3, 24, strategyBtopt}, // level 12. - {14, 15, 14, 5, 3, 32, strategyBtultra}, // level 13. - {14, 15, 15, 6, 3, 64, strategyBtultra}, // level 14. - {14, 15, 15, 7, 3, 256, strategyBtultra}, // level 15. - {14, 15, 15, 5, 3, 48, strategyBtultra2}, // level 16. - {14, 15, 15, 6, 3, 128, strategyBtultra2}, // level 17. - {14, 15, 15, 7, 3, 256, strategyBtultra2}, // level 18. - {14, 15, 15, 8, 3, 256, strategyBtultra2}, // level 19. - {14, 15, 15, 8, 3, 512, strategyBtultra2}, // level 20. - {14, 15, 15, 9, 3, 512, strategyBtultra2}, // level 21. - {14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22. - }, -} -*/ diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 95ebc3d84..f5759211d 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -35,7 +35,7 @@ type encoder interface { AppendCRC([]byte) []byte WindowSize(size int) int32 UseBlock(*blockEnc) - Reset(singleBlock bool) + Reset(d *dict, singleBlock bool) } type encoderState struct { @@ -83,8 +83,6 @@ func (e *Encoder) initialize() { e.encoders = make(chan encoder, e.o.concurrent) for i := 0; i < e.o.concurrent; i++ { enc := e.o.encoder() - // If not single block, history will be allocated on first use. - enc.Reset(true) e.encoders <- enc } } @@ -115,7 +113,7 @@ func (e *Encoder) Reset(w io.Writer) { s.filling = s.filling[:0] s.current = s.current[:0] s.previous = s.previous[:0] - s.encoder.Reset(false) + s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false s.fullFrameWritten = false @@ -200,8 +198,9 @@ func (e *Encoder) nextBlock(final bool) error { WindowSize: uint32(s.encoder.WindowSize(0)), SingleSegment: false, Checksum: e.o.crc, - DictID: 0, + DictID: e.o.dict.ID(), } + dst, err := fh.appendTo(tmp[:0]) if err != nil { return err @@ -281,7 +280,7 @@ func (e *Encoder) nextBlock(final bool) error { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) } switch err { case errIncompressible: @@ -311,7 +310,13 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { if debug { println("Using ReadFrom") } - // Maybe handle stuff queued? + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } e.state.filling = e.state.filling[:e.o.blockSize] src := e.state.filling for { @@ -328,7 +333,7 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { if debug { println("ReadFrom: got EOF final block:", len(e.state.filling)) } - return n, e.nextBlock(true) + return n, nil default: if debug { println("ReadFrom: got error:", err) @@ -450,7 +455,6 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { defer func() { // Release encoder reference to last block. // If a non-single block is needed the encoder will reset again. - enc.Reset(true) e.encoders <- enc }() // Use single segments when above minimum window and below 1MB. @@ -463,7 +467,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { WindowSize: uint32(enc.WindowSize(len(src))), SingleSegment: single, Checksum: e.o.crc, - DictID: 0, + DictID: e.o.dict.ID(), } // If less than 1MB, allocate a buffer up front. @@ -477,13 +481,18 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we can do everything in one block, prefer that. if len(src) <= maxCompressedBlockSize { + enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { _, _ = enc.CRC().Write(src) } blk := enc.Block() blk.last = true - enc.EncodeNoHist(blk, src) + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } // If we got the exact same number of literals as input, // assume the literals cannot be compressed. @@ -492,7 +501,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(blk.literals) != len(src) || len(src) != e.o.blockSize { // Output directly to dst blk.output = dst - err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) } switch err { @@ -508,7 +517,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } blk.output = oldout } else { - enc.Reset(false) + enc.Reset(e.o.dict, false) blk := enc.Block() for len(src) > 0 { todo := src @@ -519,7 +528,6 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if e.o.crc { _, _ = enc.CRC().Write(todo) } - blk.reset(nil) blk.pushOffsets() enc.Encode(blk, todo) if len(src) == 0 { @@ -529,7 +537,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) } switch err { @@ -544,6 +552,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { default: panic(err) } + blk.reset(nil) } } if e.o.crc { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index dfac14ddd..579206163 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -24,6 +24,7 @@ type encoderOptions struct { allLitEntropy bool customWindow bool customALEntropy bool + dict *dict } func (o *encoderOptions) setDefault() { @@ -265,3 +266,16 @@ func WithSingleSegment(b bool) EOption { return nil } } + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 4479cfe18..4ef7f5a3e 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -5,6 +5,7 @@ package zstd import ( + "encoding/binary" "fmt" "io" "math" @@ -16,7 +17,7 @@ type frameHeader struct { WindowSize uint32 SingleSegment bool Checksum bool - DictID uint32 // Not stored. + DictID uint32 } const maxHeaderSize = 14 @@ -30,6 +31,24 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) { if f.SingleSegment { fhd |= 1 << 5 } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } var fcs uint8 if f.ContentSize >= 256 { fcs++ @@ -40,6 +59,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) { if f.ContentSize >= 0xffffffff { fcs++ } + fhd |= fcs << 6 dst = append(dst, fhd) @@ -48,7 +68,9 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) { windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 dst = append(dst, uint8(windowLog)) } - + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } switch fcs { case 0: if f.SingleSegment { diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f418f50fc..f783e32d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -37,7 +37,7 @@ func (h *history) reset() { } h.decoders = sequenceDecs{} if h.huffTree != nil { - if h.dict == nil || h.dict.litDec != h.huffTree { + if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) } } @@ -55,7 +55,7 @@ func (h *history) setDict(dict *dict) { h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec h.recentOffsets = dict.offsets - h.huffTree = dict.litDec + h.huffTree = dict.litEnc } // append bytes to history. diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 7ff870400..b5c8ef133 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -196,6 +196,10 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { s.literals = s.literals[ll:] out := s.out + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if mo > len(s.out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) @@ -218,10 +222,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - // Copy from history. // TODO: Blocks without history could be made to ignore this completely. if v := mo - len(s.out); v > 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 690428cd2..841fd95ac 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -111,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { // Add empty last block r.block.reset(nil) r.block.last = true - err := r.block.encodeLits(false) + err := r.block.encodeLits(r.block.literals, false) if err != nil { return written, err } @@ -178,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.err = ErrSnappyCorrupt return written, r.err } - err = r.block.encode(false, false) + err = r.block.encode(nil, false, false) switch err { case errIncompressible: r.block.popOffsets() @@ -188,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { println("snappy.Decode:", err) return written, err } - err = r.block.encodeLits(false) + err = r.block.encodeLits(r.block.literals, false) if err != nil { return written, err } @@ -235,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.err = ErrSnappyCorrupt return written, r.err } - err := r.block.encodeLits(false) + err := r.block.encodeLits(r.block.literals, false) if err != nil { return written, err } diff --git a/vendor/modules.txt b/vendor/modules.txt index 9dc56b08c..7af0f1110 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -159,7 +159,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.23.4 +# github.com/containers/storage v1.23.5 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -330,7 +330,7 @@ github.com/inconshreveable/mousetrap github.com/ishidawataru/sctp # github.com/json-iterator/go v1.1.10 github.com/json-iterator/go -# github.com/klauspost/compress v1.10.11 +# github.com/klauspost/compress v1.11.0 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -701,7 +701,7 @@ gopkg.in/yaml.v3 # k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/api/apps/v1 k8s.io/api/core/v1 -# k8s.io/apimachinery v0.19.0 +# k8s.io/apimachinery v0.19.1 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/v1 |