diff options
36 files changed, 732 insertions, 256 deletions
diff --git a/.github/workflows/multi-arch-build.yaml b/.github/workflows/multi-arch-build.yaml index 0f8a3df7e..2ade44ee6 100644 --- a/.github/workflows/multi-arch-build.yaml +++ b/.github/workflows/multi-arch-build.yaml @@ -2,6 +2,11 @@ # Please see contrib/podmanimage/README.md for details on the intentions # of this workflow. +# +# BIG FAT WARNING: This workflow is duplicated across containers/skopeo, +# containers/buildah, and containers/podman. ANY AND +# ALL CHANGES MADE HERE MUST BE MANUALLY DUPLICATED +# TO THE OTHER REPOS. name: build multi-arch images @@ -86,7 +91,7 @@ jobs: docker://$PODMAN_QUAY_REGISTRY/stable | \ jq -r '.Tags[]') - # New image? Push quay.io/podman/stable:vX.X.X and :latest + # New version? Push quay.io/podman/stable:vX.X.X and :latest if ! fgrep -qx "$VERSION" <<<"$ALLTAGS"; then # Assume version-tag is also the most up to date (i.e. "latest") FQIN="$PODMAN_QUAY_REGISTRY/stable:$VERSION,$PODMAN_QUAY_REGISTRY/stable:latest" @@ -108,31 +113,24 @@ jobs: echo "::set-output name=fqin::${FQIN}" echo '::set-output name=push::true' - # This is substantially the same as the above step, except the - # $CONTAINERS_QUAY_REGISTRY is used and the "testing" - # flavor is never pushed. + # This is substantially similar to the above logic, + # but only handles $CONTAINERS_QUAY_REGISTRY for + # the stable "latest" and named-version tagged images. - name: Generate containers reg. image FQIN(s) - if: matrix.source != 'testing' + if: matrix.source == 'stable' id: containers_reg run: | - if [[ "${{ matrix.source }}" == 'stable' ]]; then - VERSION='v${{ steps.sniff_test.outputs.version }}' - # workaround vim syntax-highlight bug: ' - ALLTAGS=$(skopeo list-tags \ - docker://$CONTAINERS_QUAY_REGISTRY/podman | \ - jq -r '.Tags[]') - - # New image? Push quay.io/containers/podman:vX.X.X and :latest - if ! fgrep -qx "$VERSION" <<<"$ALLTAGS"; then - FQIN="$CONTAINERS_QUAY_REGISTRY/podman:$VERSION,$CONTAINERS_QUAY_REGISTRY/podman:latest" - else # Not a new version-tagged image, but contents may be updated - FQIN="$CONTAINERS_QUAY_REGISTRY/podman:latest" - fi - elif [[ "${{ matrix.source }}" == 'upstream' ]]; then + VERSION='v${{ steps.sniff_test.outputs.version }}' + # workaround vim syntax-highlight bug: ' + ALLTAGS=$(skopeo list-tags \ + docker://$CONTAINERS_QUAY_REGISTRY/podman | \ + jq -r '.Tags[]') + + # New version? Push quay.io/containers/podman:vX.X.X and latest + if ! fgrep -qx "$VERSION" <<<"$ALLTAGS"; then + FQIN="$CONTAINERS_QUAY_REGISTRY/podman:$VERSION,$CONTAINERS_QUAY_REGISTRY/podman:latest" + else # Not a new version-tagged image, only update latest. FQIN="$CONTAINERS_QUAY_REGISTRY/podman:latest" - else - echo "::error::Unknown matrix item '${{ matrix.source }}'" - exit 1 fi echo "::warning::Pushing $FQIN" echo "::set-output name=fqin::${FQIN}" diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go index 77ac781a5..76d7345fc 100644 --- a/cmd/podman/common/create_opts.go +++ b/cmd/podman/common/create_opts.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" + "github.com/containers/common/pkg/config" "github.com/containers/podman/v3/cmd/podman/registry" "github.com/containers/podman/v3/pkg/api/handlers" "github.com/containers/podman/v3/pkg/cgroups" @@ -140,7 +141,7 @@ func stringMaptoArray(m map[string]string) []string { // ContainerCreateToContainerCLIOpts converts a compat input struct to cliopts so it can be converted to // a specgen spec. -func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroupsManager string) (*ContainerCLIOpts, []string, error) { +func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*ContainerCLIOpts, []string, error) { var ( capAdd []string cappDrop []string @@ -248,7 +249,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup } // netMode - nsmode, _, err := specgen.ParseNetworkNamespace(string(cc.HostConfig.NetworkMode)) + nsmode, _, err := specgen.ParseNetworkNamespace(string(cc.HostConfig.NetworkMode), true) if err != nil { return nil, nil, err } @@ -507,7 +508,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup cliOpts.Restart = policy } - if cc.HostConfig.MemorySwappiness != nil && (!rootless.IsRootless() || rootless.IsRootless() && cgroupsv2 && cgroupsManager == "systemd") { + if cc.HostConfig.MemorySwappiness != nil && (!rootless.IsRootless() || rootless.IsRootless() && cgroupsv2 && rtc.Engine.CgroupManager == "systemd") { cliOpts.MemorySwappiness = *cc.HostConfig.MemorySwappiness } else { cliOpts.MemorySwappiness = -1 diff --git a/cmd/podman/common/netflags.go b/cmd/podman/common/netflags.go index 4d0a554a6..9941bc716 100644 --- a/cmd/podman/common/netflags.go +++ b/cmd/podman/common/netflags.go @@ -201,7 +201,7 @@ func NetFlagsToNetOptions(cmd *cobra.Command) (*entities.NetOptions, error) { parts := strings.SplitN(network, ":", 2) - ns, cniNets, err := specgen.ParseNetworkNamespace(network) + ns, cniNets, err := specgen.ParseNetworkNamespace(network, containerConfig.Containers.RootlessNetworking == "cni") if err != nil { return nil, err } @@ -17,7 +17,7 @@ require ( github.com/containers/image/v5 v5.12.0 github.com/containers/ocicrypt v1.1.1 github.com/containers/psgo v1.5.2 - github.com/containers/storage v1.32.0 + github.com/containers/storage v1.32.1 github.com/coreos/go-systemd/v22 v22.3.2 github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3 github.com/cri-o/ocicni v0.2.1-0.20210301205850-541cf7c703cf @@ -43,7 +43,7 @@ require ( github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 github.com/mrunalp/fileutils v0.5.0 github.com/onsi/ginkgo v1.16.2 - github.com/onsi/gomega v1.12.0 + github.com/onsi/gomega v1.13.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/runc v1.0.0-rc95 @@ -236,8 +236,9 @@ github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzP github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM= github.com/containers/storage v1.30.1/go.mod h1:NDJkiwxnSHD1Is+4DGcyR3SIEYSDOa0xnAW+uGQFx9E= github.com/containers/storage v1.31.1/go.mod h1:IFEf+yRTS0pvCGQt2tBv1Kzz2XUSPvED6uFBmWG7V/E= -github.com/containers/storage v1.32.0 h1:l2O+EybfGVkisqDkRysKG1VAO6jPPIYOV5Q4/sau86c= github.com/containers/storage v1.32.0/go.mod h1:J3q772EVbN9vgqoN/dkvInKnp4xK9ZXm7wHNfuiIDgE= +github.com/containers/storage v1.32.1 h1:JgvHY5dokiff+Ee4TdvPYO++Oq2BAave5DmyPetH2iU= +github.com/containers/storage v1.32.1/go.mod h1:do6oIF71kfkVS3CPUZr+6He94fIaj6pzF8ywevPuuOw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -531,8 +532,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -656,8 +658,9 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.12.0 h1:p4oGGk2M2UJc0wWN4lHFvIB71lxsh0T/UiKCCgFADY8= github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= diff --git a/libpod/container.go b/libpod/container.go index 591cf9bc5..c6f0cd618 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -14,7 +14,6 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/podman/v3/libpod/define" "github.com/containers/podman/v3/libpod/lock" - "github.com/containers/podman/v3/pkg/rootless" "github.com/containers/storage" "github.com/cri-o/ocicni/pkg/ocicni" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -1168,7 +1167,7 @@ func (c *Container) Networks() ([]string, bool, error) { func (c *Container) networks() ([]string, bool, error) { networks, err := c.runtime.state.GetNetworks(c) if err != nil && errors.Cause(err) == define.ErrNoSuchNetwork { - if len(c.config.Networks) == 0 && !rootless.IsRootless() { + if len(c.config.Networks) == 0 && c.config.NetMode.IsBridge() { return []string{c.runtime.netPlugin.GetDefaultNetworkName()}, true, nil } return c.config.Networks, false, nil diff --git a/libpod/runtime.go b/libpod/runtime.go index e551e6fe8..d775b55e1 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -17,6 +17,7 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/defaultnet" "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/pkg/sysregistriesv2" is "github.com/containers/image/v5/storage" @@ -458,6 +459,11 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { } } + // If we need to make a default network - do so now. + if err := defaultnet.Create(runtime.config.Network.DefaultNetwork, runtime.config.Network.DefaultSubnet, runtime.config.Network.NetworkConfigDir, runtime.config.Engine.StaticDir, runtime.config.Engine.MachineEnabled); err != nil { + logrus.Errorf("Failed to created default CNI network: %v", err) + } + // Set up the CNI net plugin netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...) if err != nil { @@ -709,9 +715,8 @@ func (r *Runtime) libimageEvents() { return status } + eventChannel := r.libimageRuntime.EventChannel() go func() { - eventChannel := r.libimageRuntime.EventChannel() - for { // Make sure to read and write all events before // checking if we're about to shutdown. diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 4e4b2a8ab..6c69d1b72 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -581,6 +581,15 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo if err := c.stop(c.StopTimeout()); err != nil && errors.Cause(err) != define.ErrConmonDead { return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID()) } + + // We unlocked as part of stop() above - there's a chance someone + // else got in and removed the container before we reacquired the + // lock. + // Do a quick ping of the database to check if the container + // still exists. + if ok, _ := r.state.HasContainer(c.ID()); !ok { + return nil + } } // Remove all active exec sessions diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go index 162a98135..8e9e1fb39 100644 --- a/pkg/api/handlers/compat/containers_create.go +++ b/pkg/api/handlers/compat/containers_create.go @@ -62,7 +62,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) { } // Take body structure and convert to cliopts - cliOpts, args, err := common.ContainerCreateToContainerCLIOpts(body, rtc.Engine.CgroupManager) + cliOpts, args, err := common.ContainerCreateToContainerCLIOpts(body, rtc) if err != nil { utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "make cli opts()")) return diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go index 269a2a2da..0bd711c90 100644 --- a/pkg/machine/qemu/machine.go +++ b/pkg/machine/qemu/machine.go @@ -408,7 +408,7 @@ func (v *MachineVM) SSH(name string, opts machine.SSHOptions) error { sshDestination := v.RemoteUsername + "@localhost" port := strconv.Itoa(v.Port) - args := []string{"-i", v.IdentityPath, "-p", port, sshDestination} + args := []string{"-i", v.IdentityPath, "-p", port, sshDestination, "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no"} if len(opts.Args) > 0 { args = append(args, opts.Args...) } else { diff --git a/pkg/specgen/generate/namespaces.go b/pkg/specgen/generate/namespaces.go index 278f35c22..f41186ae4 100644 --- a/pkg/specgen/generate/namespaces.go +++ b/pkg/specgen/generate/namespaces.go @@ -66,7 +66,7 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod) case "cgroup": return specgen.ParseCgroupNamespace(cfg.Containers.CgroupNS) case "net": - ns, _, err := specgen.ParseNetworkNamespace(cfg.Containers.NetNS) + ns, _, err := specgen.ParseNetworkNamespace(cfg.Containers.NetNS, cfg.Containers.RootlessNetworking == "cni") return ns, err } diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go index f665fc0be..80852930a 100644 --- a/pkg/specgen/namespaces.go +++ b/pkg/specgen/namespaces.go @@ -253,7 +253,7 @@ func ParseUserNamespace(ns string) (Namespace, error) { // ParseNetworkNamespace parses a network namespace specification in string // form. // Returns a namespace and (optionally) a list of CNI networks to join. -func ParseNetworkNamespace(ns string) (Namespace, []string, error) { +func ParseNetworkNamespace(ns string, rootlessDefaultCNI bool) (Namespace, []string, error) { toReturn := Namespace{} var cniNetworks []string // Net defaults to Slirp on rootless @@ -264,7 +264,11 @@ func ParseNetworkNamespace(ns string) (Namespace, []string, error) { toReturn.NSMode = FromPod case ns == "" || ns == string(Default) || ns == string(Private): if rootless.IsRootless() { - toReturn.NSMode = Slirp + if rootlessDefaultCNI { + toReturn.NSMode = Bridge + } else { + toReturn.NSMode = Slirp + } } else { toReturn.NSMode = Bridge } diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go index 37e837b1d..696cec76c 100644 --- a/test/e2e/run_networking_test.go +++ b/test/e2e/run_networking_test.go @@ -786,4 +786,18 @@ var _ = Describe("Podman run networking", func() { Expect(session.ExitCode()).To(BeZero()) Expect(session.OutputToString()).To(ContainSubstring("search dns.podman")) }) + + It("Rootless podman run with --net=bridge works and connects to default network", func() { + // This is harmless when run as root, so we'll just let it run. + ctrName := "testctr" + ctr := podmanTest.Podman([]string{"run", "-d", "--net=bridge", "--name", ctrName, ALPINE, "top"}) + ctr.WaitWithDefaultTimeout() + Expect(ctr.ExitCode()).To(BeZero()) + + inspectOut := podmanTest.InspectContainer(ctrName) + Expect(len(inspectOut)).To(Equal(1)) + Expect(len(inspectOut[0].NetworkSettings.Networks)).To(Equal(1)) + _, ok := inspectOut[0].NetworkSettings.Networks["podman"] + Expect(ok).To(BeTrue()) + }) }) diff --git a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go b/vendor/github.com/containers/common/pkg/defaultnet/default_network.go new file mode 100644 index 000000000..9b32241d6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/defaultnet/default_network.go @@ -0,0 +1,222 @@ +package defaultnet + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "regexp" + "text/template" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// TODO: A smarter implementation would make sure cni-podman0 was unused before +// making the default, and adjust if necessary +const networkTemplate = `{ + "cniVersion": "0.4.0", + "name": "{{{{.Name}}}}", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "{{{{.Subnet}}}}", + "gateway": "{{{{.Gateway}}}}" + } + ] + ] + } + }, +{{{{- if (eq .Machine true) }}}} + { + "type": "podman-machine", + "capabilities": { + "portMappings": true + } + }, +{{{{- end}}}} + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] +} +` + +var ( + // Borrowed from Podman, modified to remove dashes and periods. + nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_]*$") +) + +// Used to pass info into the template engine +type networkInfo struct { + Name string + Subnet string + Gateway string + Machine bool +} + +// The most trivial definition of a CNI network possible for our use here. +// We need the name, and nothing else. +type network struct { + Name string `json:"name"` +} + +// Create makes the CNI default network, if necessary. +// Accepts the name and subnet of the network to create (a standard template +// will be used, with these values plugged in), the configuration directory +// where CNI configs are stored (to verify if a named configuration already +// exists), an exists directory (where a sentinel file will be stored, to ensure +// the network is only made once), and an isMachine bool (to determine whether +// the machine block will be added to the config). +// Create first checks if a default network has already been created via the +// presence of a sentinel file. If it does exist, it returns immediately without +// error. +// It next checks if a CNI network with the given name already exists. In that +// case, it creates the sentinel file and returns without error. +// If neither of these are true, the default network is created. +func Create(name, subnet, configDir, existsDir string, isMachine bool) error { + // TODO: Should probably regex name to make sure it's valid. + if name == "" || subnet == "" || configDir == "" || existsDir == "" { + return errors.Errorf("must provide values for all arguments to MakeDefaultNetwork") + } + if !nameRegex.MatchString(name) { + return errors.Errorf("invalid default network name %s - letters, numbers, and underscores only", name) + } + + sentinelFile := filepath.Join(existsDir, "defaultCNINetExists") + + // Check if sentinel file exists, return immediately if it does. + if _, err := os.Stat(sentinelFile); err == nil { + return nil + } + + // Create the sentinel file if it doesn't exist, so subsequent checks + // don't need to go further. + file, err := os.Create(sentinelFile) + if err != nil { + return err + } + file.Close() + + // We may need to make the config dir. + if err := os.MkdirAll(configDir, 0755); err != nil && !os.IsExist(err) { + return errors.Wrapf(err, "error creating CNI configuration directory") + } + + // Check all networks in the CNI conflist. + files, err := ioutil.ReadDir(configDir) + if err != nil { + return errors.Wrapf(err, "error reading CNI configuration directory") + } + if len(files) > 0 { + configPaths := make([]string, 0, len(files)) + for _, path := range files { + if !path.IsDir() && filepath.Ext(path.Name()) == ".conflist" { + configPaths = append(configPaths, filepath.Join(configDir, path.Name())) + } + } + for _, config := range configPaths { + configName, err := getConfigName(config) + if err != nil { + logrus.Errorf("Error reading CNI configuration file: %v", err) + continue + } + if configName == name { + return nil + } + } + } + + // We need to make the config. + // Get subnet and gateway. + _, ipNet, err := net.ParseCIDR(subnet) + if err != nil { + return errors.Wrapf(err, "default network subnet %s is invalid", subnet) + } + + ones, bits := ipNet.Mask.Size() + if ones == bits { + return errors.Wrapf(err, "default network subnet %s is to small", subnet) + } + gateway := make(net.IP, len(ipNet.IP)) + // copy the subnet ip to the gateway so we can modify it + copy(gateway, ipNet.IP) + // the default gateway should be the first ip in the subnet + gateway[len(gateway)-1]++ + + netInfo := new(networkInfo) + netInfo.Name = name + netInfo.Gateway = gateway.String() + netInfo.Subnet = ipNet.String() + netInfo.Machine = isMachine + + templ, err := template.New("network_template").Delims("{{{{", "}}}}").Parse(networkTemplate) + if err != nil { + return errors.Wrapf(err, "error compiling template for default network") + } + var output bytes.Buffer + if err := templ.Execute(&output, netInfo); err != nil { + return errors.Wrapf(err, "error executing template for default network") + } + + // Next, we need to place the config on disk. + // Loop through possible indexes, with a limit of 100 attempts. + created := false + for i := 87; i < 187; i++ { + configFile, err := os.OpenFile(filepath.Join(configDir, fmt.Sprintf("%d-%s.conflist", i, name)), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) + if err != nil { + logrus.Infof("Attempt to create default CNI network config file failed: %v", err) + continue + } + defer configFile.Close() + + created = true + + // Success - file is open. Write our buffer to it. + if _, err := configFile.Write(output.Bytes()); err != nil { + return errors.Wrapf(err, "error writing default CNI config to file") + } + break + } + if !created { + return errors.Errorf("no available default network configuration file was found") + } + + return nil +} + +// Get the name of the configuration contained in a given conflist file. Accepts +// the full path of a .conflist CNI configuration. +func getConfigName(file string) (string, error) { + contents, err := ioutil.ReadFile(file) + if err != nil { + return "", err + } + config := new(network) + if err := json.Unmarshal(contents, config); err != nil { + return "", errors.Wrapf(err, "error decoding CNI configuration %s", filepath.Base(file)) + } + return config.Name, nil +} diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 359c41089..96cd6ee1e 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.32.0 +1.32.1 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 6407e9a83..b4f773f2b 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -626,5 +626,5 @@ func (r *containerStore) ReloadIfChanged() error { if err == nil && modified { return r.Load() } - return nil + return err } diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index 3d720cde2..e7ca56e64 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -10,7 +10,7 @@ require ( github.com/google/go-intervals v0.0.2 github.com/hashicorp/go-multierror v1.1.1 github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.12.2 + github.com/klauspost/compress v1.12.3 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-shellwords v1.0.11 github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index 91403a201..5373d0597 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -383,8 +383,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8= -github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index c7b968e05..bca25a65b 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -810,5 +810,5 @@ func (r *imageStore) ReloadIfChanged() error { if err == nil && modified { return r.Load() } - return nil + return err } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 1ed265d5d..9b05474bb 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -1156,48 +1156,51 @@ func (r *layerStore) deleteInternal(id string) error { } id = layer.ID err := r.driver.Remove(id) - if err == nil { - os.Remove(r.tspath(id)) - os.RemoveAll(r.datadir(id)) - delete(r.byid, id) - for _, name := range layer.Names { - delete(r.byname, name) + if err != nil { + return err + } + + os.Remove(r.tspath(id)) + os.RemoveAll(r.datadir(id)) + delete(r.byid, id) + for _, name := range layer.Names { + delete(r.byname, name) + } + r.idindex.Delete(id) + mountLabel := layer.MountLabel + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + r.deleteInDigestMap(id) + toDeleteIndex := -1 + for i, candidate := range r.layers { + if candidate.ID == id { + toDeleteIndex = i + break } - r.idindex.Delete(id) - mountLabel := layer.MountLabel - if layer.MountPoint != "" { - delete(r.bymount, layer.MountPoint) + } + if toDeleteIndex != -1 { + // delete the layer at toDeleteIndex + if toDeleteIndex == len(r.layers)-1 { + r.layers = r.layers[:len(r.layers)-1] + } else { + r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) } - r.deleteInDigestMap(id) - toDeleteIndex := -1 - for i, candidate := range r.layers { - if candidate.ID == id { - toDeleteIndex = i + } + if mountLabel != "" { + var found bool + for _, candidate := range r.layers { + if candidate.MountLabel == mountLabel { + found = true break } } - if toDeleteIndex != -1 { - // delete the layer at toDeleteIndex - if toDeleteIndex == len(r.layers)-1 { - r.layers = r.layers[:len(r.layers)-1] - } else { - r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) - } - } - if mountLabel != "" { - var found bool - for _, candidate := range r.layers { - if candidate.MountLabel == mountLabel { - found = true - break - } - } - if !found { - label.ReleaseLabel(mountLabel) - } + if !found { + label.ReleaseLabel(mountLabel) } } - return err + + return nil } func (r *layerStore) deleteInDigestMap(id string) { @@ -1777,7 +1780,7 @@ func (r *layerStore) ReloadIfChanged() error { if err == nil && modified { return r.Load() } - return nil + return err } func closeAll(closes ...func() error) (rErr error) { diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 759407c63..d6d547c64 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -788,6 +788,15 @@ func (s *store) load() error { } s.containerStore = rcs + for _, store := range driver.AdditionalImageStores() { + gipath := filepath.Join(store, driverPrefix+"images") + ris, err := newROImageStore(gipath) + if err != nil { + return err + } + s.roImageStores = append(s.roImageStores, ris) + } + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { return err @@ -910,22 +919,10 @@ func (s *store) ImageStore() (ImageStore, error) { // Store. Accessing these stores directly will bypass locking and // synchronization, so it is not a part of the exported Store interface. func (s *store) ROImageStores() ([]ROImageStore, error) { - if len(s.roImageStores) != 0 { - return s.roImageStores, nil - } - driver, err := s.getGraphDriver() - if err != nil { - return nil, err - } - driverPrefix := s.graphDriverName + "-" - for _, store := range driver.AdditionalImageStores() { - gipath := filepath.Join(store, driverPrefix+"images") - ris, err := newROImageStore(gipath) - if err != nil { - return nil, err - } - s.roImageStores = append(s.roImageStores, ris) + if s.imageStore == nil { + return nil, ErrLoadError } + return s.roImageStores, nil } @@ -2655,8 +2652,13 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { return "", err } + modified, err := s.graphLock.Modified() + if err != nil { + return "", err + } + /* We need to make sure the home mount is present when the Mount is done. */ - if s.graphLock.TouchedSince(s.lastLoaded) { + if modified { s.graphDriver = nil s.layerStore = nil s.graphDriver, err = s.getGraphDriver() diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 40b5802de..5283ac5a5 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -644,7 +644,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: - d.w.logNewTablePenalty = 8 + d.w.logNewTablePenalty = 10 d.window = make([]byte, 32<<10) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 678f08105..347ac2c90 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -45,7 +45,7 @@ const ( bTableBits = 17 // Bits used in the big tables bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history. + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. ) diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index db54be139..3ad5e9807 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -6,6 +6,7 @@ package flate import ( "encoding/binary" + "fmt" "io" ) @@ -27,7 +28,7 @@ const ( // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since // we accumulate 6 bytes between writes to the buffer. - bufferFlushSize = 240 + bufferFlushSize = 246 // bufferSize is the actual output byte buffer size. // It must have additional headroom for a flush @@ -59,19 +60,31 @@ var offsetExtraBits = [64]int8{ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, } -var offsetBase = [64]uint32{ - /* normal deflate */ - 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, - 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, - 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, - 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, - 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, - 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, +var offsetCombined = [32]uint32{} - /* extended window */ - 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, - 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, - 0x100000, 0x180000, 0x200000, 0x300000, +func init() { + var offsetBase = [64]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + } } // The odd order in which the codegen code sizes are written. @@ -88,15 +101,16 @@ type huffmanBitWriter struct { bits uint64 nbits uint16 nbytes uint8 + lastHuffMan bool literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder offsetEncoding *huffmanEncoder codegenEncoding *huffmanEncoder err error lastHeader int // Set between 0 (reused block can be up to 2x the size) logNewTablePenalty uint - lastHuffMan bool - bytes [256]byte + bytes [256 + 8]byte literalFreq [lengthCodesStart + 32]uint16 offsetFreq [32]uint16 codegenFreq [codegenCodeCount]uint16 @@ -128,6 +142,7 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { return &huffmanBitWriter{ writer: w, literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), codegenEncoding: newHuffmanEncoder(codegenCodeCount), offsetEncoding: newHuffmanEncoder(offsetCodeCount), } @@ -745,9 +760,31 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) offs := oeCodes[:32] lengths := leCodes[lengthCodesStart:] lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + for _, t := range tokens { if t < matchType { - w.writeCode(lits[t.literal()]) + //w.writeCode(lits[t.literal()]) + c := lits[t.literal()] + bits |= uint64(c.code) << nbits + nbits += c.len + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } continue } @@ -759,38 +796,99 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } else { // inlined c := lengths[lengthCode&31] - w.bits |= uint64(c.code) << w.nbits - w.nbits += c.len - if w.nbits >= 48 { - w.writeOutBits() + bits |= uint64(c.code) << nbits + nbits += c.len + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } } } extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) if extraLengthBits > 0 { + //w.writeBits(extraLength, extraLengthBits) extraLength := int32(length - lengthBase[lengthCode&31]) - w.writeBits(extraLength, extraLengthBits) + bits |= uint64(extraLength) << nbits + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } } // Write the offset offset := t.offset() - offsetCode := offsetCode(offset) + offsetCode := offset >> 16 + offset &= matchOffsetOnlyMask if false { w.writeCode(offs[offsetCode&31]) } else { // inlined - c := offs[offsetCode&31] - w.bits |= uint64(c.code) << w.nbits - w.nbits += c.len - if w.nbits >= 48 { - w.writeOutBits() + c := offs[offsetCode] + bits |= uint64(c.code) << nbits + nbits += c.len + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } } } - extraOffsetBits := uint16(offsetExtraBits[offsetCode&63]) - if extraOffsetBits > 0 { - extraOffset := int32(offset - offsetBase[offsetCode&63]) - w.writeBits(extraOffset, extraOffsetBits) + offsetComb := offsetCombined[offsetCode] + if offsetComb > 1<<16 { + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits + nbits += uint16(offsetComb >> 16) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } } } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + if deferEOB { w.writeCode(leCodes[endBlockMarker]) } @@ -825,13 +923,28 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { } } + // Fill is rarely better... + const fill = false + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + // Add everything as literals // We have to estimate the header size. // Assume header is around 70 bytes: // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 - estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) - estBits += w.lastHeader + len(input)/32 + histogram(input, w.literalFreq[:numLiterals], fill) + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + if fill { + // Clear fill... + for i := range w.literalFreq[:numLiterals] { + w.literalFreq[i] = 0 + } + histogram(input, w.literalFreq[:numLiterals], false) + } + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + estBits += w.lastHeader if w.lastHeader == 0 { estBits += guessHeaderSizeBits } @@ -839,33 +952,31 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Store bytes, if we don't get a reasonable improvement. ssize, storable := w.storedSize(input) - if storable && ssize < estBits { + if storable && ssize <= estBits { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } - reuseSize := 0 if w.lastHeader > 0 { - reuseSize = w.literalEncoding.bitLength(w.literalFreq[:256]) + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) if estBits < reuseSize { + if debugDeflate { + //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) } } - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 + count := 0 if w.lastHeader == 0 { - if !eof && !sync { - // Generate a slightly suboptimal tree that can be used for all. - fillHist(w.literalFreq[:numLiterals]) - } - w.literalFreq[endBlockMarker] = 1 - w.literalEncoding.generate(w.literalFreq[:numLiterals], 15) - + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) @@ -876,34 +987,47 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) w.lastHuffMan = true w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } } - encoding := w.literalEncoding.codes[:257] + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] - w.bits |= uint64(c.code) << w.nbits - w.nbits += c.len - if w.nbits >= 48 { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - binary.LittleEndian.PutUint64(w.bytes[n:], bits) - n += 6 - if n >= bufferFlushSize { + bits |= uint64(c.code) << nbits + nbits += c.len + if debugDeflate { + count += int(c.len) + } + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { if w.err != nil { - n = 0 + nbytes = 0 return } - w.write(w.bytes[:n]) - n = 0 + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 } - w.nbytes = n } } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + fmt.Println("wrote", count/8, "bytes") + } if eof || sync { - w.writeCode(encoding[endBlockMarker]) + w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 w.lastHuffMan = false } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 0d3445a1c..67b2b3872 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -21,9 +21,13 @@ type hcode struct { } type huffmanEncoder struct { - codes []hcode - freqcache []literalNode - bitCount [17]int32 + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode } type literalNode struct { @@ -132,6 +136,21 @@ func (h *huffmanEncoder) bitLengthRaw(b []byte) int { return total } +// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused. +func (h *huffmanEncoder) canReuseBits(freq []uint16) int { + var total int + for i, f := range freq { + if f != 0 { + code := h.codes[i] + if code.len == 0 { + return math.MaxInt32 + } + total += int(f) * int(code.len) + } + } + return total +} + // Return the number of literals assigned to each bit size in the Huffman encoding // // This method is only called when list.length >= 3 @@ -291,12 +310,6 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // freq An array of frequencies, in which frequency[i] gives the frequency of literal i. // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - if h.freqcache == nil { - // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. - // The largest of these is literalCount, so we allocate for that case. - h.freqcache = make([]literalNode, literalCount+1) - } list := h.freqcache[:len(freq)+1] // Number of non-zero literals count := 0 @@ -330,10 +343,14 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { h.assignEncodingAndSize(bitCount, list) } +// atLeastOne clamps the result between 1 and 15. func atLeastOne(v float32) float32 { if v < 1 { return 1 } + if v > 15 { + return 15 + } return v } @@ -346,31 +363,12 @@ func fillHist(b []uint16) { } } -// histogramSize accumulates a histogram of b in h. -// An estimated size in bits is returned. -// len(h) must be >= 256, and h's elements must be all zeroes. -func histogramSize(b []byte, h []uint16, fill bool) (bits int) { +func histogram(b []byte, h []uint16, fill bool) { h = h[:256] for _, t := range b { h[t]++ } - total := len(b) if fill { - for _, v := range h { - if v == 0 { - total++ - } - } + fillHist(h) } - - invTotal := 1.0 / float32(total) - shannon := float32(0.0) - for _, v := range h { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } - } - - return int(shannon + 0.99) } diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index f9abf606d..eb862d7a9 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,14 +13,17 @@ import ( ) const ( + // From top // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal - lengthShift = 22 - offsetMask = 1<<lengthShift - 1 - typeMask = 3 << 30 - literalType = 0 << 30 - matchType = 1 << 30 + // 5 bits offsetcode + // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1<<lengthShift - 1 + typeMask = 3 << 30 + literalType = 0 << 30 + matchType = 1 << 30 + matchOffsetOnlyMask = 0xffff ) // The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH) @@ -187,7 +190,7 @@ func (t *tokens) indexTokens(in []token) { t.AddLiteral(tok.literal()) continue } - t.AddMatch(uint32(tok.length()), tok.offset()) + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) } } @@ -232,7 +235,7 @@ func (t *tokens) EstimatedBits() int { for _, v := range t.litHist[:] { if v > 0 { n := float32(v) - shannon += -mFastLog2(n*invTotal) * n + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n } } // Just add 15 for EOB @@ -240,7 +243,7 @@ func (t *tokens) EstimatedBits() int { for i, v := range t.extraHist[1 : literalCount-256] { if v > 0 { n := float32(v) - shannon += -mFastLog2(n*invTotal) * n + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n bits += int(lengthExtraBits[i&31]) * int(v) nMatches += int(v) } @@ -251,7 +254,7 @@ func (t *tokens) EstimatedBits() int { for i, v := range t.offHist[:offsetCodeCount] { if v > 0 { n := float32(v) - shannon += -mFastLog2(n*invTotal) * n + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n bits += int(offsetExtraBits[i&31]) * int(v) } } @@ -270,11 +273,13 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { panic(fmt.Errorf("invalid offset: %v", xoffset)) } } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 t.nLits++ - lengthCode := lengthCodes1[uint8(xlength)] & 31 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode]++ t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset) - t.extraHist[lengthCode]++ - t.offHist[offsetCode(xoffset)&31]++ t.n++ } @@ -286,7 +291,8 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { panic(fmt.Errorf("invalid offset: %v", xoffset)) } } - oc := offsetCode(xoffset) & 31 + oc := offsetCode(xoffset) + xoffset |= oc << 16 for xlength > 0 { xl := xlength if xl > 258 { @@ -294,12 +300,11 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { xl = 258 - baseMatchLength } xlength -= xl - xl -= 3 + xl -= baseMatchLength t.nLits++ - lengthCode := lengthCodes1[uint8(xl)] & 31 - t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset) - t.extraHist[lengthCode]++ + t.extraHist[lengthCodes1[uint8(xl)]]++ t.offHist[oc]++ + t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset) t.n++ } } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index e7d7eb0ae..787813fa9 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -16,8 +16,7 @@ Currently the package is heavily optimized for 64 bit processors and will be sig Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. -Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd - +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) ## Compressor @@ -405,13 +404,28 @@ BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 This reflects the performance around May 2020, but this may be out of date. +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + # Contributions Contributions are always welcome. For new features/fixes, remember to add tests and for performance enhancements include benchmarks. -For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. - For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index b51d922bd..6cea054d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -123,12 +123,10 @@ func newBlockDec(lowMem bool) *blockDec { // Input must be a start of a block and will be at the end of the block when returned. func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.WindowSize = windowSize - tmp := br.readSmall(3) - if tmp == nil { - if debug { - println("Reading block header:", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err } bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) b.Last = bh&1 != 0 @@ -179,7 +177,6 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { if cap(b.dst) <= maxSize { b.dst = make([]byte, 0, maxSize+1) } - var err error b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debug { diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 658ef7838..17e820a6a 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -12,8 +12,8 @@ import ( type byteBuffer interface { // Read up to 8 bytes. - // Returns nil if no more input is available. - readSmall(n int) []byte + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) // Read >8 bytes. // MAY use the destination slice. @@ -29,17 +29,17 @@ type byteBuffer interface { // in-memory buffer type byteBuf []byte -func (b *byteBuf) readSmall(n int) []byte { +func (b *byteBuf) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } bb := *b if len(bb) < n { - return nil + return nil, io.ErrUnexpectedEOF } r := bb[:n] *b = bb[n:] - return r + return r, nil } func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { @@ -81,19 +81,22 @@ type readerWrapper struct { tmp [8]byte } -func (r *readerWrapper) readSmall(n int) []byte { +func (r *readerWrapper) readSmall(n int) ([]byte, error) { if debugAsserts && n > 8 { panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) } n2, err := io.ReadFull(r.r, r.tmp[:n]) // We only really care about the actual bytes read. - if n2 != n { + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } if debug { println("readSmall: got", n2, "want", n, "err", err) } - return nil + return nil, err } - return r.tmp[:n] + return r.tmp[:n], nil } func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 693c5f05d..4dc151213 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -80,9 +80,14 @@ func (d *frameDec) reset(br byteBuffer) error { d.WindowSize = 0 var b []byte for { - b = br.readSmall(4) - if b == nil { + var err error + b, err = br.readSmall(4) + switch err { + case io.EOF, io.ErrUnexpectedEOF: return io.EOF + default: + return err + case nil: } if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { if debug { @@ -92,14 +97,14 @@ func (d *frameDec) reset(br byteBuffer) error { break } // Read size to skip - b = br.readSmall(4) - if b == nil { - println("Reading Frame Size EOF") - return io.ErrUnexpectedEOF + b, err = br.readSmall(4) + if err != nil { + println("Reading Frame Size", err) + return err } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err := br.skipN(int(n)) + err = br.skipN(int(n)) if err != nil { if debug { println("Reading discarded frame", err) @@ -147,12 +152,11 @@ func (d *frameDec) reset(br byteBuffer) error { if size == 3 { size = 4 } - b = br.readSmall(int(size)) - if b == nil { - if debug { - println("Reading Dictionary_ID", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF + + b, err = br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err } var id uint32 switch size { @@ -187,10 +191,10 @@ func (d *frameDec) reset(br byteBuffer) error { } d.FrameContentSize = 0 if fcsSize > 0 { - b := br.readSmall(fcsSize) - if b == nil { - println("Reading Frame content", io.ErrUnexpectedEOF) - return io.ErrUnexpectedEOF + b, err = br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err } switch fcsSize { case 1: @@ -307,10 +311,10 @@ func (d *frameDec) checkCRC() error { tmp[3] = byte(got >> 24) // We can overwrite upper tmp now - want := d.rawInput.readSmall(4) - if want == nil { - println("CRC missing?") - return io.ErrUnexpectedEOF + want, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } if !bytes.Equal(tmp[:], want) { diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index e35a0a2f8..9325b928a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -13,8 +13,9 @@ import ( // See https://www.winzip.com/win/en/comp_info.html const ZipMethodWinZip = 93 -// ZipMethodPKWare is the method number used by PKWARE to indicate Zstandard compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.7.TXT +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT const ZipMethodPKWare = 20 var zipReaderPool sync.Pool diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index b05f3a935..4783c0d43 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,9 @@ +## 1.13.0 + +### Features +- gmeasure provides BETA support for benchmarking (#447) [8f2dfbf] +- Set consistently and eventually defaults on init (#443) [12eb778] + ## 1.12.0 ### Features diff --git a/vendor/github.com/onsi/gomega/env.go b/vendor/github.com/onsi/gomega/env.go new file mode 100644 index 000000000..62fd885a9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/env.go @@ -0,0 +1,40 @@ +package gomega + +import ( + "os" + + "github.com/onsi/gomega/internal/defaults" +) + +const ( + ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION" + ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL" + EventuallyTimeoutEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT" + EventuallyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL" +) + +func init() { + defaults.SetDurationFromEnv( + os.Getenv, + SetDefaultConsistentlyDuration, + ConsistentlyDurationEnvVarName, + ) + + defaults.SetDurationFromEnv( + os.Getenv, + SetDefaultConsistentlyPollingInterval, + ConsistentlyPollingIntervalEnvVarName, + ) + + defaults.SetDurationFromEnv( + os.Getenv, + SetDefaultEventuallyTimeout, + EventuallyTimeoutEnvVarName, + ) + + defaults.SetDurationFromEnv( + os.Getenv, + SetDefaultEventuallyPollingInterval, + EventuallyPollingIntervalEnvVarName, + ) +} diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 9050e15e8..a05b34b27 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.12.0" +const GOMEGA_VERSION = "1.13.0" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/internal/defaults/env.go b/vendor/github.com/onsi/gomega/internal/defaults/env.go new file mode 100644 index 000000000..bc29c63d5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/defaults/env.go @@ -0,0 +1,22 @@ +package defaults + +import ( + "fmt" + "time" +) + +func SetDurationFromEnv(getDurationFromEnv func(string) string, varSetter func(time.Duration), name string) { + durationFromEnv := getDurationFromEnv(name) + + if len(durationFromEnv) == 0 { + return + } + + duration, err := time.ParseDuration(durationFromEnv) + + if err != nil { + panic(fmt.Sprintf("Expected a duration when using %s! Parse error %v", name, err)) + } + + varSetter(duration) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b4c2c6330..879bb2759 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -102,6 +102,7 @@ github.com/containers/common/pkg/cgroupv2 github.com/containers/common/pkg/chown github.com/containers/common/pkg/completion github.com/containers/common/pkg/config +github.com/containers/common/pkg/defaultnet github.com/containers/common/pkg/filters github.com/containers/common/pkg/manifests github.com/containers/common/pkg/parse @@ -192,7 +193,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.32.0 +# github.com/containers/storage v1.32.1 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -394,7 +395,7 @@ github.com/json-iterator/go # github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a github.com/juju/ansiterm github.com/juju/ansiterm/tabwriter -# github.com/klauspost/compress v1.12.2 +# github.com/klauspost/compress v1.12.3 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -475,13 +476,14 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.12.0 +# github.com/onsi/gomega v1.13.0 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/gbytes github.com/onsi/gomega/gexec github.com/onsi/gomega/internal/assertion github.com/onsi/gomega/internal/asyncassertion +github.com/onsi/gomega/internal/defaults github.com/onsi/gomega/internal/oraclematcher github.com/onsi/gomega/internal/testingtsupport github.com/onsi/gomega/matchers |