aboutsummaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/common/libimage/runtime.go54
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/config.go6
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/config.go3
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go4
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf5
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go8
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go8
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/types.go9
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go97
-rw-r--r--vendor/github.com/containers/image/v5/copy/progress_reader.go26
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go20
-rw-r--r--vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go6
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go69
-rw-r--r--vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go47
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go74
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go118
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go34
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go26
-rw-r--r--vendor/github.com/containers/image/v5/internal/private/private.go106
-rw-r--r--vendor/github.com/containers/image/v5/internal/types/types.go91
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go119
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go21
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_image.go176
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go4
-rw-r--r--vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md3
-rw-r--r--vendor/github.com/containers/libtrust/SECURITY.md3
-rw-r--r--vendor/github.com/godbus/dbus/v5/auth.go10
-rw-r--r--vendor/github.com/godbus/dbus/v5/conn.go27
-rw-r--r--vendor/github.com/godbus/dbus/v5/conn_other.go9
-rw-r--r--vendor/github.com/godbus/dbus/v5/dbus.go14
-rw-r--r--vendor/github.com/godbus/dbus/v5/doc.go10
-rw-r--r--vendor/github.com/godbus/dbus/v5/escape.go84
-rw-r--r--vendor/github.com/godbus/dbus/v5/export.go19
-rw-r--r--vendor/github.com/godbus/dbus/v5/homedir.go29
-rw-r--r--vendor/github.com/godbus/dbus/v5/homedir_dynamic.go15
-rw-r--r--vendor/github.com/godbus/dbus/v5/homedir_static.go45
-rw-r--r--vendor/github.com/godbus/dbus/v5/message.go16
-rw-r--r--vendor/github.com/godbus/dbus/v5/server_interfaces.go2
-rw-r--r--vendor/github.com/godbus/dbus/v5/sig.go2
-rw-r--r--vendor/github.com/godbus/dbus/v5/transport_unix.go12
-rw-r--r--vendor/github.com/godbus/dbus/v5/transport_zos.go6
-rw-r--r--vendor/github.com/godbus/dbus/v5/variant.go2
-rw-r--r--vendor/github.com/klauspost/compress/README.md28
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go166
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go40
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go222
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate_gen.go657
-rw-r--r--vendor/github.com/klauspost/compress/flate/level1.go56
-rw-r--r--vendor/github.com/klauspost/compress/flate/level3.go45
-rw-r--r--vendor/github.com/klauspost/compress/flate/token.go17
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go333
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go4
-rw-r--r--vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go3
-rw-r--r--vendor/github.com/sylabs/sif/v2/LICENSE.md2
-rw-r--r--vendor/github.com/xeipuuv/gojsonpointer/README.md2
-rw-r--r--vendor/golang.org/x/term/codereview.cfg1
-rw-r--r--vendor/golang.org/x/term/term.go2
-rw-r--r--vendor/modules.txt25
60 files changed, 1564 insertions, 1482 deletions
diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go
index 86e7eee56..559a9a6a6 100644
--- a/vendor/github.com/containers/common/libimage/runtime.go
+++ b/vendor/github.com/containers/common/libimage/runtime.go
@@ -190,6 +190,8 @@ type LookupImageOptions struct {
returnManifestIfNoInstance bool
}
+var errNoHexValue = errors.New("invalid format: no 64-byte hexadecimal value")
+
// Lookup Image looks up `name` in the local container storage. Returns the
// image and the name it has been found with. Note that name may also use the
// `containers-storage:` prefix used to refer to the containers-storage
@@ -233,13 +235,29 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
name = normalizedName
}
+ byDigest := false
originalName := name
- idByDigest := false
if strings.HasPrefix(name, "sha256:") {
- // Strip off the sha256 prefix so it can be parsed later on.
- idByDigest = true
+ byDigest = true
name = strings.TrimPrefix(name, "sha256:")
}
+ byFullID := reference.IsFullIdentifier(name)
+
+ if byDigest && !byFullID {
+ return nil, "", fmt.Errorf("%s: %v", originalName, errNoHexValue)
+ }
+
+ // If the name clearly refers to a local image, try to look it up.
+ if byFullID || byDigest {
+ img, err := r.lookupImageInLocalStorage(originalName, name, options)
+ if err != nil {
+ return nil, "", err
+ }
+ if img != nil {
+ return img, originalName, nil
+ }
+ return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName)
+ }
// Unless specified, set the platform specified in the system context
// for later platform matching. Builder likes to set these things via
@@ -256,27 +274,11 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64").
options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant)
- // First, check if we have an exact match in the storage. Maybe an ID
- // or a fully-qualified image name.
- img, err := r.lookupImageInLocalStorage(name, name, options)
- if err != nil {
- return nil, "", err
- }
- if img != nil {
- return img, originalName, nil
- }
-
- // If the name clearly referred to a local image, there's nothing we can
- // do anymore.
- if storageRef != nil || idByDigest {
- return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName)
- }
-
// Second, try out the candidates as resolved by shortnames. This takes
// "localhost/" prefixed images into account as well.
candidates, err := shortnames.ResolveLocally(&r.systemContext, name)
if err != nil {
- return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName)
+ return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
}
// Backwards compat: normalize to docker.io as some users may very well
// rely on that.
@@ -294,7 +296,17 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
}
}
- return r.lookupImageInDigestsAndRepoTags(originalName, options)
+ // The specified name may refer to a short ID. Note that this *must*
+ // happen after the short-name expansion as done above.
+ img, err := r.lookupImageInLocalStorage(name, name, options)
+ if err != nil {
+ return nil, "", err
+ }
+ if img != nil {
+ return img, name, err
+ }
+
+ return r.lookupImageInDigestsAndRepoTags(name, options)
}
// lookupImageInLocalStorage looks up the specified candidate for name in the
diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go
index b5877879c..e801e1469 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/config.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/config.go
@@ -187,9 +187,6 @@ func (n *cniNetwork) NetworkInspect(nameOrID string) (types.Network, error) {
}
func createIPMACVLAN(network *types.Network) error {
- if network.Internal {
- return errors.New("internal is not supported with macvlan")
- }
if network.NetworkInterface != "" {
interfaceNames, err := internalutil.GetLiveNetworkNames()
if err != nil {
@@ -201,6 +198,9 @@ func createIPMACVLAN(network *types.Network) error {
}
if len(network.Subnets) == 0 {
network.IPAMOptions["driver"] = types.DHCPIPAMDriver
+ if network.Internal {
+ return errors.New("internal is not supported with macvlan and dhcp ipam driver")
+ }
} else {
network.IPAMOptions["driver"] = types.HostLocalIPAMDriver
}
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go
index 12e7140cc..d42062927 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/config.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go
@@ -142,9 +142,6 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
}
func createMacvlan(network *types.Network) error {
- if network.Internal {
- return errors.New("internal is not supported with macvlan")
- }
if network.NetworkInterface != "" {
interfaceNames, err := internalutil.GetLiveNetworkNames()
if err != nil {
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index f4d85bb76..a1d6f259a 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -406,6 +406,10 @@ type EngineConfig struct {
// before sending kill signal.
StopTimeout uint `toml:"stop_timeout,omitempty,omitzero"`
+ // ExitCommandDelay is the number of seconds to wait for the exit
+ // command to be send to the API process on the server.
+ ExitCommandDelay uint `toml:"exit_command_delay,omitempty,omitzero"`
+
// ImageCopyTmpDir is the default location for storing temporary
// container image content, Can be overridden with the TMPDIR
// environment variable. If you specify "storage", then the
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 2c4d3733c..03de59943 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -515,6 +515,11 @@ default_sysctls = [
#
#stop_timeout = 10
+# Number of seconds to wait before exit command in API process is given to.
+# This mimics Docker's exec cleanup behaviour, where the default is 5 minutes (value is in seconds).
+#
+#exit_command_delay = 300
+
# map of service destinations
#
#[service_destinations]
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 5f40aeb7c..e4344e8be 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -95,6 +95,10 @@ var (
parseSubnetPool("10.96.0.0/11", 24),
parseSubnetPool("10.128.0.0/9", 24),
}
+ // additionalHelperBinariesDir is an extra helper binaries directory that
+ // should be set during link-time, if different packagers put their
+ // helper binary in a different location
+ additionalHelperBinariesDir string
)
// nolint:unparam
@@ -277,6 +281,9 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes")
c.HelperBinariesDir = defaultHelperBinariesDir
+ if additionalHelperBinariesDir != "" {
+ c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir)
+ }
c.HooksDir = DefaultHooksDirs
c.ImageDefaultTransport = _defaultTransport
c.StateType = BoltDBStateStore
@@ -286,6 +293,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.CgroupManager = defaultCgroupManager()
c.ServiceTimeout = uint(5)
c.StopTimeout = uint(10)
+ c.ExitCommandDelay = uint(5 * 60)
c.NetworkCmdOptions = []string{
"enable_ipv6=true",
}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
index 0c022ac7a..d2498747c 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
@@ -1,3 +1,4 @@
+//go:build seccomp
// +build seccomp
// SPDX-License-Identifier: Apache-2.0
@@ -120,6 +121,13 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error)
return nil, err
}
+ for _, flag := range config.Flags {
+ newConfig.Flags = append(newConfig.Flags, specs.LinuxSeccompFlag(flag))
+ }
+
+ newConfig.ListenerPath = config.ListenerPath
+ newConfig.ListenerMetadata = config.ListenerMetadata
+
if len(config.ArchMap) != 0 {
for _, a := range config.ArchMap {
seccompArch, ok := nativeToSeccomp[arch]
diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go
index a8a9e9d4f..784b1ba8d 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/types.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/types.go
@@ -14,9 +14,12 @@ type Seccomp struct {
// Architectures is kept to maintain backward compatibility with the old
// seccomp profile.
- Architectures []Arch `json:"architectures,omitempty"`
- ArchMap []Architecture `json:"archMap,omitempty"`
- Syscalls []*Syscall `json:"syscalls"`
+ Architectures []Arch `json:"architectures,omitempty"`
+ ArchMap []Architecture `json:"archMap,omitempty"`
+ Syscalls []*Syscall `json:"syscalls"`
+ Flags []string `json:"flags,omitempty"`
+ ListenerPath string `json:"listenerPath,omitempty"`
+ ListenerMetadata string `json:"listenerMetadata,omitempty"`
}
// Architecture is used to represent a specific architecture
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 512e643b9..0501fb3c1 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -15,8 +15,10 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
+ "github.com/containers/image/v5/internal/imagedestination"
+ "github.com/containers/image/v5/internal/imagesource"
"github.com/containers/image/v5/internal/pkg/platform"
- internalTypes "github.com/containers/image/v5/internal/types"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
@@ -63,8 +65,8 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
- dest types.ImageDestination
- rawSource types.ImageSource
+ dest private.ImageDestination
+ rawSource private.ImageSource
reportWriter io.Writer
progressOutput io.Writer
progressInterval time.Duration
@@ -202,20 +204,22 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
reportWriter = options.ReportWriter
}
- dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
+ publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
if err != nil {
return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef))
}
+ dest := imagedestination.FromPublic(publicDest)
defer func() {
if err := dest.Close(); err != nil {
retErr = errors.Wrapf(retErr, " (dest: %v)", err)
}
}()
- rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
+ publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
if err != nil {
return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef))
}
+ rawSource := imagesource.FromPublic(publicRawSource)
defer func() {
if err := rawSource.Close(); err != nil {
retErr = errors.Wrapf(retErr, " (src: %v)", err)
@@ -1225,28 +1229,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
- var (
- blobInfo types.BlobInfo
- reused bool
- err error
- )
- // Note: the storage destination optimizes the committing of
- // layers which requires passing the index of the layer.
- // Hence, we need to special case and cast.
- dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions)
- if ok {
- options := internalTypes.TryReusingBlobOptions{
- Cache: ic.c.blobInfoCache,
- CanSubstitute: ic.canSubstituteBlobs,
- SrcRef: srcRef,
- EmptyLayer: emptyLayer,
- }
- options.LayerIndex = &layerIndex
- reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options)
- } else {
- reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
- }
-
+ reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
+ Cache: ic.c.blobInfoCache,
+ CanSubstitute: ic.canSubstituteBlobs,
+ EmptyLayer: emptyLayer,
+ LayerIndex: &layerIndex,
+ SrcRef: srcRef,
+ })
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest)
}
@@ -1286,9 +1275,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// of the source file are not known yet and must be fetched.
// Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it.
- imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
- imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
- if okSource && okDest && !diffIDIsNeeded {
+ if ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() && !diffIDIsNeeded {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
hideProgressBar := true
@@ -1296,29 +1283,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
bar.Abort(hideProgressBar)
}()
- progress := make(chan int64)
- terminate := make(chan interface{})
-
- defer close(terminate)
- defer close(progress)
-
- proxy := imageSourceSeekableProxy{
- source: imgSource,
- progress: progress,
+ proxy := blobChunkAccessorProxy{
+ wrapped: ic.c.rawSource,
+ bar: bar,
}
- go func() {
- for {
- select {
- case written := <-progress:
- bar.IncrInt64(written)
- case <-terminate:
- return
- }
- }
- }()
-
bar.SetTotal(srcInfo.Size, false)
- info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache)
+ info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
bar.SetRefill(srcInfo.Size - bar.Current())
bar.SetCurrent(srcInfo.Size)
@@ -1658,24 +1628,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// === Finally, send the layer stream to dest.
- var uploadedInfo types.BlobInfo
- // Note: the storage destination optimizes the committing of layers
- // which requires passing the index of the layer. Hence, we need to
- // special case and cast.
- dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions)
- if ok {
- options := internalTypes.PutBlobOptions{
- Cache: c.blobInfoCache,
- IsConfig: isConfig,
- EmptyLayer: emptyLayer,
- }
- if !isConfig {
- options.LayerIndex = &layerIndex
- }
- uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
- } else {
- uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
+ options := private.PutBlobOptions{
+ Cache: c.blobInfoCache,
+ IsConfig: isConfig,
+ EmptyLayer: emptyLayer,
+ }
+ if !isConfig {
+ options.LayerIndex = &layerIndex
}
+ uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
}
diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go
index 42f490d32..de23cec1b 100644
--- a/vendor/github.com/containers/image/v5/copy/progress_reader.go
+++ b/vendor/github.com/containers/image/v5/copy/progress_reader.go
@@ -5,8 +5,9 @@ import (
"io"
"time"
- internalTypes "github.com/containers/image/v5/internal/types"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
+ "github.com/vbauerster/mpb/v7"
)
// progressReader is a reader that reports its progress on an interval.
@@ -80,25 +81,26 @@ func (r *progressReader) Read(p []byte) (int, error) {
return n, err
}
-// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes
+// blobChunkAccessorProxy wraps a BlobChunkAccessor and keeps track of how many bytes
// are received.
-type imageSourceSeekableProxy struct {
- // source is the seekable input to read from.
- source internalTypes.ImageSourceSeekable
- // progress is the chan where the total number of bytes read so far are reported.
- progress chan int64
+type blobChunkAccessorProxy struct {
+ wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
+ bar *mpb.Bar // A progress bar updated with the number of bytes read so far
}
-// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received
-// to the progress chan.
-func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
- rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks)
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
if err == nil {
total := int64(0)
for _, c := range chunks {
total += int64(c.Length)
}
- s.progress <- total
+ s.bar.IncrInt64(total)
}
return rc, errs, err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 314e9b394..cb520d670 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -16,7 +16,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
- internalTypes "github.com/containers/image/v5/internal/types"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
@@ -147,6 +147,11 @@ func (s *dockerImageSource) Close() error {
return nil
}
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (s *dockerImageSource) SupportsGetBlobAt() bool {
+ return true
+}
+
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
// to read the image's layers.
@@ -288,7 +293,7 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
}
// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
-func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) {
+func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) {
defer close(streams)
defer close(errs)
currentOffset := uint64(0)
@@ -322,7 +327,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
}
// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan.
-func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) {
+func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) {
defer close(streams)
defer close(errs)
if !strings.HasPrefix(mediaType, "multipart/") {
@@ -357,9 +362,12 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
}
}
-// GetBlobAt returns a stream for the specified blob.
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
// The specified chunks must be not overlapping and sorted by their offset.
-func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
headers := make(map[string][]string)
var rangeVals []string
@@ -401,7 +409,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
return streams, errs, nil
case http.StatusBadRequest:
res.Body.Close()
- return nil, nil, internalTypes.BadPartialRequestError{Status: res.Status}
+ return nil, nil, private.BadPartialRequestError{Status: res.Status}
default:
err := httpResponseToError(res, "Error fetching partial blob")
if err == nil {
diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
new file mode 100644
index 000000000..7b15871f7
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
@@ -0,0 +1,6 @@
+package reference
+
+// Return true if the specified string fully matches `IdentifierRegexp`.
+func IsFullIdentifier(s string) bool {
+ return anchoredIdentifierRegexp.MatchString(s)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
new file mode 100644
index 000000000..82734a6cd
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
@@ -0,0 +1,69 @@
+package imagedestination
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// FromPublic(dest) returns an object that provides the private.ImageDestination API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageDestination, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageDestination.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementors of a public interface.
+func FromPublic(dest types.ImageDestination) private.ImageDestination {
+ if dest2, ok := dest.(private.ImageDestination); ok {
+ return dest2
+ }
+ return &wrapped{ImageDestination: dest}
+}
+
+// wrapped provides the private.ImageDestination operations
+// for a destination that only implements types.ImageDestination
+type wrapped struct {
+ types.ImageDestination
+}
+
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (w *wrapped) SupportsPutBlobPartial() bool {
+ return false
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
+}
+
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
+ return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name())
+}
+
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
new file mode 100644
index 000000000..fe1be8d9e
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
@@ -0,0 +1,47 @@
+package imagesource
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+)
+
+// FromPublic(src) returns an object that provides the private.ImageSource API
+//
+// Eventually, we might want to expose this function, and methods of the returned object,
+// as a public API (or rather, a variant that does not include the already-superseded
+// methods of types.ImageSource, and has added more future-proofing), and more strongly
+// deprecate direct use of types.ImageSource.
+//
+// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
+// with public methods, or perhaps a private interface), so that we can add methods
+// without breaking any external implementors of a public interface.
+func FromPublic(src types.ImageSource) private.ImageSource {
+ if src2, ok := src.(private.ImageSource); ok {
+ return src2
+ }
+ return &wrapped{ImageSource: src}
+}
+
+// wrapped provides the private.ImageSource operations
+// for a source that only implements types.ImageSource
+type wrapped struct {
+ types.ImageSource
+}
+
+// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+func (w *wrapped) SupportsGetBlobAt() bool {
+ return false
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name())
+}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
deleted file mode 100644
index bf6cc87d4..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// Key represents a single key linked to one or more kernel keyrings.
-type Key struct {
- Name string
-
- id, ring keyID
- size int
-}
-
-// ID returns the 32-bit kernel identifier for a specific key
-func (k *Key) ID() int32 {
- return int32(k.id)
-}
-
-// Get the key's value as a byte slice
-func (k *Key) Get() ([]byte, error) {
- var (
- b []byte
- err error
- sizeRead int
- )
-
- if k.size == 0 {
- k.size = 512
- }
-
- size := k.size
-
- b = make([]byte, int(size))
- sizeRead = size + 1
- for sizeRead > size {
- r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size)
- if err != nil {
- return nil, err
- }
-
- if sizeRead = int(r1); sizeRead > size {
- b = make([]byte, sizeRead)
- size = sizeRead
- sizeRead = size + 1
- } else {
- k.size = sizeRead
- }
- }
- return b[:k.size], err
-}
-
-// Unlink a key from the keyring it was loaded from (or added to). If the key
-// is not linked to any other keyrings, it is destroyed.
-func (k *Key) Unlink() error {
- _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0)
- return err
-}
-
-// Describe returns a string describing the attributes of a specified key
-func (k *Key) Describe() (string, error) {
- keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id))
- if err != nil {
- return "", err
- }
- return keyAttr, nil
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
deleted file mode 100644
index 5eaad615c..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
-package keyctl
-
-import (
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Keyring is the basic interface to a linux keyctl keyring.
-type Keyring interface {
- ID
- Add(string, []byte) (*Key, error)
- Search(string) (*Key, error)
-}
-
-type keyring struct {
- id keyID
-}
-
-// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have.
-type ID interface {
- ID() int32
-}
-
-// Add a new key to a keyring. The key can be searched for later by name.
-func (kr *keyring) Add(name string, key []byte) (*Key, error) {
- r, err := unix.AddKey("user", name, key, int(kr.id))
- if err == nil {
- key := &Key{Name: name, id: keyID(r), ring: kr.id}
- return key, nil
- }
- return nil, err
-}
-
-// Search for a key by name, this also searches child keyrings linked to this
-// one. The key, if found, is linked to the top keyring that Search() was called
-// from.
-func (kr *keyring) Search(name string) (*Key, error) {
- id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0)
- if err == nil {
- return &Key{Name: name, id: keyID(id), ring: kr.id}, nil
- }
- return nil, err
-}
-
-// ID returns the 32-bit kernel identifier of a keyring
-func (kr *keyring) ID() int32 {
- return int32(kr.id)
-}
-
-// SessionKeyring returns the current login session keyring
-func SessionKeyring() (Keyring, error) {
- return newKeyring(unix.KEY_SPEC_SESSION_KEYRING)
-}
-
-// UserKeyring returns the keyring specific to the current user.
-func UserKeyring() (Keyring, error) {
- return newKeyring(unix.KEY_SPEC_USER_KEYRING)
-}
-
-// Unlink an object from a keyring
-func Unlink(parent Keyring, child ID) error {
- _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0)
- return err
-}
-
-// Link a key into a keyring
-func Link(parent Keyring, child ID) error {
- _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0)
- return err
-}
-
-// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it
-func ReadUserKeyring() ([]*Key, error) {
- var (
- b []byte
- err error
- sizeRead int
- )
- krSize := 4
- size := krSize
- b = make([]byte, size)
- sizeRead = size + 1
- for sizeRead > size {
- r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size)
- if err != nil {
- return nil, err
- }
-
- if sizeRead = int(r1); sizeRead > size {
- b = make([]byte, sizeRead)
- size = sizeRead
- sizeRead = size + 1
- } else {
- krSize = sizeRead
- }
- }
- keyIDs := getKeyIDsFromByte(b[:krSize])
- return keyIDs, err
-}
-
-func getKeyIDsFromByte(byteKeyIDs []byte) []*Key {
- idSize := 4
- var keys []*Key
- for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize {
- tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx]))
- keys = append(keys, &Key{id: keyID(tempID)})
- }
- return keys
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
deleted file mode 100644
index 5f4d2157a..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// KeyPerm represents in-kernel access control permission to keys and keyrings
-// as a 32-bit integer broken up into four permission sets, one per byte.
-// In MSB order, the perms are: Processor, User, Group, Other.
-type KeyPerm uint32
-
-const (
- // PermOtherAll sets all permission for Other
- PermOtherAll KeyPerm = 0x3f << (8 * iota)
- // PermGroupAll sets all permission for Group
- PermGroupAll
- // PermUserAll sets all permission for User
- PermUserAll
- // PermProcessAll sets all permission for Processor
- PermProcessAll
-)
-
-// SetPerm sets the permissions on a key or keyring.
-func SetPerm(k ID, p KeyPerm) error {
- err := unix.KeyctlSetperm(int(k.ID()), uint32(p))
- return err
-}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
deleted file mode 100644
index f61666e42..000000000
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 Jesse Sipprell. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-// +build linux
-
-package keyctl
-
-import (
- "golang.org/x/sys/unix"
-)
-
-type keyID int32
-
-func newKeyring(id keyID) (*keyring, error) {
- r1, err := unix.KeyctlGetKeyringID(int(id), true)
- if err != nil {
- return nil, err
- }
-
- if id < 0 {
- r1 = int(id)
- }
- return &keyring{id: keyID(r1)}, nil
-}
diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go
new file mode 100644
index 000000000..65788651f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/private/private.go
@@ -0,0 +1,106 @@
+package private
+
+import (
+ "context"
+ "io"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+)
+
+// ImageSource is an internal extension to the types.ImageSource interface.
+type ImageSource interface {
+ types.ImageSource
+
+ // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
+ SupportsGetBlobAt() bool
+ // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt().
+ BlobChunkAccessor
+}
+
+// ImageDestination is an internal extension to the types.ImageDestination
+// interface.
+type ImageDestination interface {
+ types.ImageDestination
+
+ // SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+ SupportsPutBlobPartial() bool
+
+ // PutBlobWithOptions writes contents of stream and returns data representing the result.
+ // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+ // inputInfo.Size is the expected length of stream, if known.
+ // inputInfo.MediaType describes the blob format, if known.
+ // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+ // to any other readers for download using the supplied digest.
+ // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+ PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
+
+ // PutBlobPartial attempts to create a blob using the data that is already present
+ // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+ // It is available only if SupportsPutBlobPartial().
+ // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+ // should fall back to PutBlobWithOptions.
+ PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error)
+
+ // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+ // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+ // reflected in the manifest that will be written.
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
+}
+
+// PutBlobOptions are used in PutBlobWithOptions.
+type PutBlobOptions struct {
+ Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos.
+ IsConfig bool // True if the blob is a config
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+}
+
+// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
+type TryReusingBlobOptions struct {
+ Cache types.BlobInfoCache // Cache to use and/or update.
+ // If true, it is allowed to use an equivalent of the desired blob;
+ // in that case the returned info may not match the input.
+ CanSubstitute bool
+
+ // The following fields are new to internal/private. Users of internal/private MUST fill them in,
+ // but they also must expect that they will be ignored by types.ImageDestination transports.
+
+ EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
+ LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
+ SrcRef reference.Named // A reference to the source image that contains the input blob.
+}
+
+// ImageSourceChunk is a portion of a blob.
+// This API is experimental and can be changed without bumping the major version number.
+type ImageSourceChunk struct {
+ Offset uint64
+ Length uint64
+}
+
+// BlobChunkAccessor allows fetching discontiguous chunks of a blob.
+type BlobChunkAccessor interface {
+ // GetBlobAt returns a sequential channel of readers that contain data for the requested
+ // blob chunks, and a channel that might get a single error value.
+ // The specified chunks must be not overlapping and sorted by their offset.
+ // The readers must be fully consumed, in the order they are returned, before blocking
+ // to read the next chunk.
+ GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
+}
+
+// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request.
+type BadPartialRequestError struct {
+ Status string
+}
+
+func (e BadPartialRequestError) Error() string {
+ return e.Status
+}
diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go
deleted file mode 100644
index 388f8cf3b..000000000
--- a/vendor/github.com/containers/image/v5/internal/types/types.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package types
-
-import (
- "context"
- "io"
-
- "github.com/containers/image/v5/docker/reference"
- publicTypes "github.com/containers/image/v5/types"
-)
-
-// ImageDestinationWithOptions is an internal extension to the ImageDestination
-// interface.
-type ImageDestinationWithOptions interface {
- publicTypes.ImageDestination
-
- // PutBlobWithOptions is a wrapper around PutBlob. If
- // options.LayerIndex is set, the blob will be committed directly.
- // Either by the calling goroutine or by another goroutine already
- // committing layers.
- //
- // Please note that TryReusingBlobWithOptions and PutBlobWithOptions
- // *must* be used the together. Mixing the two with non "WithOptions"
- // functions is not supported.
- PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error)
-
- // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
- // options.LayerIndex is set, the reused blob will be recoreded as
- // already pulled.
- //
- // Please note that TryReusingBlobWithOptions and PutBlobWithOptions
- // *must* be used the together. Mixing the two with non "WithOptions"
- // functions is not supported.
- TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error)
-}
-
-// PutBlobOptions are used in PutBlobWithOptions.
-type PutBlobOptions struct {
- // Cache to look up blob infos.
- Cache publicTypes.BlobInfoCache
- // Denotes whether the blob is a config or not.
- IsConfig bool
- // Indicates an empty layer.
- EmptyLayer bool
- // The corresponding index in the layer slice.
- LayerIndex *int
-}
-
-// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
-type TryReusingBlobOptions struct {
- // Cache to look up blob infos.
- Cache publicTypes.BlobInfoCache
- // Use an equivalent of the desired blob.
- CanSubstitute bool
- // Indicates an empty layer.
- EmptyLayer bool
- // The corresponding index in the layer slice.
- LayerIndex *int
- // The reference of the image that contains the target blob.
- SrcRef reference.Named
-}
-
-// ImageSourceChunk is a portion of a blob.
-// This API is experimental and can be changed without bumping the major version number.
-type ImageSourceChunk struct {
- Offset uint64
- Length uint64
-}
-
-// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob.
-// This API is experimental and can be changed without bumping the major version number.
-type ImageSourceSeekable interface {
- // GetBlobAt returns a stream for the specified blob.
- // The specified chunks must be not overlapping and sorted by their offset.
- GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
-}
-
-// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable.
-// This API is experimental and can be changed without bumping the major version number.
-type ImageDestinationPartial interface {
- // PutBlobPartial writes contents of stream and returns data representing the result.
- PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error)
-}
-
-// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request.
-type BadPartialRequestError struct {
- Status string
-}
-
-func (e BadPartialRequestError) Error() string {
- return e.Status
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
deleted file mode 100644
index 0bf161259..000000000
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package config
-
-import (
- "fmt"
- "strings"
-
- "github.com/containers/image/v5/internal/pkg/keyctl"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-// NOTE: none of the functions here are currently used. If we ever want to
-// re-enable keyring support, we should introduce a similar built-in credential
-// helpers as for `sysregistriesv2.AuthenticationFileHelper`.
-
-const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused
-
-func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused
- userkeyring, err := keyctl.UserKeyring()
- if err != nil {
- return "", "", err
- }
- key, err := userkeyring.Search(genDescription(registry))
- if err != nil {
- return "", "", err
- }
- authData, err := key.Get()
- if err != nil {
- return "", "", err
- }
- parts := strings.SplitN(string(authData), "\x00", 2)
- if len(parts) != 2 {
- return "", "", nil
- }
- return parts[0], parts[1], nil
-}
-
-func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused
- userkeyring, err := keyctl.UserKeyring()
-
- if err != nil {
- return err
- }
- key, err := userkeyring.Search(genDescription(registry))
- if err != nil {
- return err
- }
- return key.Unlink()
-}
-
-func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused
- keys, err := keyctl.ReadUserKeyring()
- if err != nil {
- return err
- }
-
- userkeyring, err := keyctl.UserKeyring()
- if err != nil {
- return err
- }
-
- for _, k := range keys {
- keyAttr, err := k.Describe()
- if err != nil {
- return err
- }
- // split string "type;uid;gid;perm;description"
- keyAttrs := strings.SplitN(keyAttr, ";", 5)
- if len(keyAttrs) < 5 {
- return errors.Errorf("Key attributes of %d are not available", k.ID())
- }
- keyDescribe := keyAttrs[4]
- if strings.HasPrefix(keyDescribe, keyDescribePrefix) {
- err := keyctl.Unlink(userkeyring, k)
- if err != nil {
- return errors.Wrapf(err, "unlinking key %d", k.ID())
- }
- logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr)
- }
- }
- return nil
-}
-
-func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused
- keyring, err := keyctl.SessionKeyring()
- if err != nil {
- return err
- }
- id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password)))
- if err != nil {
- return err
- }
-
- // sets all permission(view,read,write,search,link,set attribute) for current user
- // it enables the user to search the key after it linked to user keyring and unlinked from session keyring
- err = keyctl.SetPerm(id, keyctl.PermUserAll)
- if err != nil {
- return err
- }
- // link the key to userKeyring
- userKeyring, err := keyctl.UserKeyring()
- if err != nil {
- return errors.Wrapf(err, "getting user keyring")
- }
- err = keyctl.Link(userKeyring, id)
- if err != nil {
- return errors.Wrapf(err, "linking the key to user keyring")
- }
- // unlink the key from session keyring
- err = keyctl.Unlink(keyring, id)
- if err != nil {
- return errors.Wrapf(err, "unlinking the key from session keyring")
- }
- return nil
-}
-
-func genDescription(registry string) string { //nolint:deadcode,unused
- return fmt.Sprintf("%s%s", keyDescribePrefix, registry)
-}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
deleted file mode 100644
index d9827d8ed..000000000
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !linux && (!386 || !amd64)
-// +build !linux
-// +build !386 !amd64
-
-package config
-
-func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused
- return "", "", ErrNotSupported
-}
-
-func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused
- return ErrNotSupported
-}
-
-func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused
- return ErrNotSupported
-}
-
-func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused
- return ErrNotSupported
-}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 7329ef6ee..bcb09c83c 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -18,9 +18,9 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/tmpdir"
- internalTypes "github.com/containers/image/v5/internal/types"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
@@ -446,15 +446,20 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
}
-// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is
-// set, the blob will be committed directly. Either by the calling goroutine
-// or by another goroutine already committing layers.
-//
-// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
-// used the together. Mixing the two with non "WithOptions" functions is not
-// supported.
-func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) {
- info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig)
+// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
+func (s *storageImageDestination) HasThreadSafePutBlob() bool {
+ return true
+}
+
+// PutBlobWithOptions writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
+ info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options)
if err != nil {
return info, err
}
@@ -466,11 +471,6 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
}
-// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
-func (s *storageImageDestination) HasThreadSafePutBlob() bool {
- return true
-}
-
// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
@@ -480,6 +480,15 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool {
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{
+ Cache: cache,
+ IsConfig: isConfig,
+ })
+}
+
+// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
+// The caller must arrange the blob to be eventually commited using s.commitLayer().
+func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
@@ -534,7 +543,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
s.lock.Unlock()
// This is safe because we have just computed diffID, and blobDigest was either computed
// by us, or validated by the caller (usually copy.digestingReader).
- cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
+ options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return types.BlobInfo{
Digest: blobDigest,
Size: blobSize,
@@ -542,80 +551,40 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}, nil
}
-// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
-// options.LayerIndex is set, the reused blob will be recoreded as already
-// pulled.
-//
-// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be
-// used the together. Mixing the two with the non "WithOptions" functions
-// is not supported.
-func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
- reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef)
- if err != nil || !reused || options.LayerIndex == nil {
- return reused, info, err
- }
-
- return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
-}
-
-// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob.
-// If ref is provided, this function first tries to get layer from Additional Layer Store.
-func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) {
- // lock the entire method as it executes fairly quickly
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if ref != nil {
- // Check if we have the layer in the underlying additional layer store.
- aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String())
- if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest)
- } else if err == nil {
- // Record the uncompressed value so that we can use it to calculate layer IDs.
- s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
- s.blobAdditionalLayer[blobinfo.Digest] = aLayer
- return true, types.BlobInfo{
- Digest: blobinfo.Digest,
- Size: aLayer.CompressedSize(),
- MediaType: blobinfo.MediaType,
- }, nil
- }
- }
-
- return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
-}
-
type zstdFetcher struct {
- stream internalTypes.ImageSourceSeekable
- ctx context.Context
- blobInfo types.BlobInfo
+ chunkAccessor private.BlobChunkAccessor
+ ctx context.Context
+ blobInfo types.BlobInfo
}
-// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt.
+// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
- var newChunks []internalTypes.ImageSourceChunk
+ var newChunks []private.ImageSourceChunk
for _, v := range chunks {
- i := internalTypes.ImageSourceChunk{
+ i := private.ImageSourceChunk{
Offset: v.Offset,
Length: v.Length,
}
newChunks = append(newChunks, i)
}
- rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks)
- if _, ok := err.(internalTypes.BadPartialRequestError); ok {
+ rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks)
+ if _, ok := err.(private.BadPartialRequestError); ok {
err = chunked.ErrBadRequest{}
}
return rc, errs, err
}
-// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed
-// in a non-sequential way to retrieve the missing chunks.
-func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
+// PutBlobPartial attempts to create a blob using the data that is already present
+// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
+// It is available only if SupportsPutBlobPartial().
+// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
+// should fall back to PutBlobWithOptions.
+func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
fetcher := zstdFetcher{
- stream: stream,
- ctx: ctx,
- blobInfo: srcInfo,
+ chunkAccessor: chunkAccessor,
+ ctx: ctx,
+ blobInfo: srcInfo,
}
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
@@ -640,6 +609,22 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int
return srcInfo, nil
}
+// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
+// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
+// reflected in the manifest that will be written.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
+ reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options)
+ if err != nil || !reused || options.LayerIndex == nil {
+ return reused, info, err
+ }
+
+ return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
+}
+
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
@@ -650,16 +635,36 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{
+ Cache: cache,
+ CanSubstitute: canSubstitute,
+ })
+}
+
+// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
+// The caller must arrange the blob to be eventually commited using s.commitLayer().
+func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()
- return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
-}
+ if options.SrcRef != nil {
+ // Check if we have the layer in the underlying additional layer store.
+ aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String())
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest)
+ } else if err == nil {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
+ s.blobAdditionalLayer[blobinfo.Digest] = aLayer
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: aLayer.CompressedSize(),
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
-// tryReusingBlobLocked implements a core functionality of TryReusingBlob.
-// This must be called with a lock being held on storageImageDestination.
-func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if blobinfo.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
}
@@ -708,9 +713,9 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob
// Does the blob correspond to a known DiffID which we already have available?
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
- // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size.
- if canSubstitute || blobinfo.Size != -1 {
- if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
+ // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
+ if options.CanSubstitute || blobinfo.Size != -1 {
+ if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest)
@@ -720,8 +725,8 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
return true, blobinfo, nil
}
- if !canSubstitute {
- return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo)
+ if !options.CanSubstitute {
+ return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo)
}
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
return true, types.BlobInfo{
@@ -1261,6 +1266,11 @@ func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool {
return true // Yes, we want the unmodified manifest
}
+// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
+func (s *storageImageDestination) SupportsPutBlobPartial() bool {
+ return true
+}
+
// PutSignatures records the image's signatures for committing as a single data blob.
func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
sizes := []int{}
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 0954850fe..a9163e059 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 19
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 1
+ VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = ""
+ VersionDev = "-dev"
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md
new file mode 100644
index 000000000..83b061c70
--- /dev/null
+++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The libtrust Project Community Code of Conduct
+
+The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md
new file mode 100644
index 000000000..fab2c41e8
--- /dev/null
+++ b/vendor/github.com/containers/libtrust/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the libtrust Project
+
+The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/godbus/dbus/v5/auth.go b/vendor/github.com/godbus/dbus/v5/auth.go
index a59b4c0eb..0f3b252c0 100644
--- a/vendor/github.com/godbus/dbus/v5/auth.go
+++ b/vendor/github.com/godbus/dbus/v5/auth.go
@@ -176,9 +176,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo
return err, false
}
state = waitingForReject
+ } else {
+ conn.uuid = string(s[1])
+ return nil, true
}
- conn.uuid = string(s[1])
- return nil, true
case state == waitingForData:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
@@ -191,9 +192,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo
return err, false
}
state = waitingForReject
+ } else {
+ conn.uuid = string(s[1])
+ return nil, true
}
- conn.uuid = string(s[1])
- return nil, true
case state == waitingForOk && string(s[0]) == "DATA":
err = authWriteLine(conn.transport, []byte("DATA"))
if err != nil {
diff --git a/vendor/github.com/godbus/dbus/v5/conn.go b/vendor/github.com/godbus/dbus/v5/conn.go
index 76fc5cde3..69978ea26 100644
--- a/vendor/github.com/godbus/dbus/v5/conn.go
+++ b/vendor/github.com/godbus/dbus/v5/conn.go
@@ -169,7 +169,7 @@ func Connect(address string, opts ...ConnOption) (*Conn, error) {
// SystemBusPrivate returns a new private connection to the system bus.
// Note: this connection is not ready to use. One must perform Auth and Hello
-// on the connection before it is useable.
+// on the connection before it is usable.
func SystemBusPrivate(opts ...ConnOption) (*Conn, error) {
return Dial(getSystemBusPlatformAddress(), opts...)
}
@@ -284,10 +284,6 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
conn.ctx = context.Background()
}
conn.ctx, conn.cancelCtx = context.WithCancel(conn.ctx)
- go func() {
- <-conn.ctx.Done()
- conn.Close()
- }()
conn.calls = newCallTracker()
if conn.handler == nil {
@@ -302,6 +298,11 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
conn.outHandler = &outputHandler{conn: conn}
conn.names = newNameTracker()
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
+
+ go func() {
+ <-conn.ctx.Done()
+ conn.Close()
+ }()
return conn, nil
}
@@ -550,6 +551,11 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call {
call.ctx = ctx
call.ctxCanceler = canceler
conn.calls.track(msg.serial, call)
+ if ctx.Err() != nil {
+ // short path: don't even send the message if context already cancelled
+ conn.calls.handleSendError(msg, ctx.Err())
+ return call
+ }
go func() {
<-ctx.Done()
conn.calls.handleSendError(msg, ctx.Err())
@@ -649,7 +655,9 @@ func (conn *Conn) RemoveMatchSignalContext(ctx context.Context, options ...Match
// Signal registers the given channel to be passed all received signal messages.
//
-// Multiple of these channels can be registered at the same time.
+// Multiple of these channels can be registered at the same time. The channel is
+// closed if the Conn is closed; it should not be closed by the caller before
+// RemoveSignal was called on it.
//
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
// channel for eavesdropped messages, this channel receives all signals, and
@@ -765,7 +773,12 @@ func getKey(s, key string) string {
for _, keyEqualsValue := range strings.Split(s, ",") {
keyValue := strings.SplitN(keyEqualsValue, "=", 2)
if len(keyValue) == 2 && keyValue[0] == key {
- return keyValue[1]
+ val, err := UnescapeBusAddressValue(keyValue[1])
+ if err != nil {
+ // No way to return an error.
+ return ""
+ }
+ return val
}
}
return ""
diff --git a/vendor/github.com/godbus/dbus/v5/conn_other.go b/vendor/github.com/godbus/dbus/v5/conn_other.go
index 616dcf664..90289ca85 100644
--- a/vendor/github.com/godbus/dbus/v5/conn_other.go
+++ b/vendor/github.com/godbus/dbus/v5/conn_other.go
@@ -54,7 +54,7 @@ func tryDiscoverDbusSessionBusAddress() string {
if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) {
// if /run/user/<uid>/bus exists, that file itself
// *is* the unix socket, so return its path
- return fmt.Sprintf("unix:path=%s", runUserBusFile)
+ return fmt.Sprintf("unix:path=%s", EscapeBusAddressValue(runUserBusFile))
}
if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) {
// if /run/user/<uid>/dbus-session exists, it's a
@@ -85,9 +85,6 @@ func getRuntimeDirectory() (string, error) {
}
func fileExists(filename string) bool {
- if _, err := os.Stat(filename); !os.IsNotExist(err) {
- return true
- } else {
- return false
- }
+ _, err := os.Stat(filename)
+ return !os.IsNotExist(err)
}
diff --git a/vendor/github.com/godbus/dbus/v5/dbus.go b/vendor/github.com/godbus/dbus/v5/dbus.go
index ddf3b7afd..c188d1048 100644
--- a/vendor/github.com/godbus/dbus/v5/dbus.go
+++ b/vendor/github.com/godbus/dbus/v5/dbus.go
@@ -122,8 +122,11 @@ func isConvertibleTo(dest, src reflect.Type) bool {
case dest.Kind() == reflect.Slice:
return src.Kind() == reflect.Slice &&
isConvertibleTo(dest.Elem(), src.Elem())
+ case dest.Kind() == reflect.Ptr:
+ dest = dest.Elem()
+ return isConvertibleTo(dest, src)
case dest.Kind() == reflect.Struct:
- return src == interfacesType
+ return src == interfacesType || dest.Kind() == src.Kind()
default:
return src.ConvertibleTo(dest)
}
@@ -274,13 +277,8 @@ func storeSliceIntoInterface(dest, src reflect.Value) error {
func storeSliceIntoSlice(dest, src reflect.Value) error {
if dest.IsNil() || dest.Len() < src.Len() {
dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap()))
- }
- if dest.Len() != src.Len() {
- return fmt.Errorf(
- "dbus.Store: type mismatch: "+
- "slices are different lengths "+
- "need: %d have: %d",
- src.Len(), dest.Len())
+ } else if dest.Len() > src.Len() {
+ dest.Set(dest.Slice(0, src.Len()))
}
for i := 0; i < src.Len(); i++ {
err := store(dest.Index(i), getVariantValue(src.Index(i)))
diff --git a/vendor/github.com/godbus/dbus/v5/doc.go b/vendor/github.com/godbus/dbus/v5/doc.go
index ade1df951..8f25a00d6 100644
--- a/vendor/github.com/godbus/dbus/v5/doc.go
+++ b/vendor/github.com/godbus/dbus/v5/doc.go
@@ -10,8 +10,10 @@ value.
Conversion Rules
For outgoing messages, Go types are automatically converted to the
-corresponding D-Bus types. The following types are directly encoded as their
-respective D-Bus equivalents:
+corresponding D-Bus types. See the official specification at
+https://dbus.freedesktop.org/doc/dbus-specification.html#type-system for more
+information on the D-Bus type system. The following types are directly encoded
+as their respective D-Bus equivalents:
Go type | D-Bus type
------------+-----------
@@ -39,8 +41,8 @@ Maps encode as DICTs, provided that their key type can be used as a key for
a DICT.
Structs other than Variant and Signature encode as a STRUCT containing their
-exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
-be skipped.
+exported fields in order. Fields whose tags contain `dbus:"-"` and unexported
+fields will be skipped.
Pointers encode as the value they're pointed to.
diff --git a/vendor/github.com/godbus/dbus/v5/escape.go b/vendor/github.com/godbus/dbus/v5/escape.go
new file mode 100644
index 000000000..d1509d945
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/escape.go
@@ -0,0 +1,84 @@
+package dbus
+
+import "net/url"
+
+// EscapeBusAddressValue implements a requirement to escape the values
+// in D-Bus server addresses, as defined by the D-Bus specification at
+// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses.
+func EscapeBusAddressValue(val string) string {
+ toEsc := strNeedsEscape(val)
+ if toEsc == 0 {
+ // Avoid unneeded allocation/copying.
+ return val
+ }
+
+ // Avoid allocation for short paths.
+ var buf [64]byte
+ var out []byte
+ // Every to-be-escaped byte needs 2 extra bytes.
+ required := len(val) + 2*toEsc
+ if required <= len(buf) {
+ out = buf[:required]
+ } else {
+ out = make([]byte, required)
+ }
+
+ j := 0
+ for i := 0; i < len(val); i++ {
+ if ch := val[i]; needsEscape(ch) {
+ // Convert ch to %xx, where xx is hex value.
+ out[j] = '%'
+ out[j+1] = hexchar(ch >> 4)
+ out[j+2] = hexchar(ch & 0x0F)
+ j += 3
+ } else {
+ out[j] = ch
+ j++
+ }
+ }
+
+ return string(out)
+}
+
+// UnescapeBusAddressValue unescapes values in D-Bus server addresses,
+// as defined by the D-Bus specification at
+// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses.
+func UnescapeBusAddressValue(val string) (string, error) {
+ // Looks like url.PathUnescape does exactly what is required.
+ return url.PathUnescape(val)
+}
+
+// hexchar returns an octal representation of a n, where n < 16.
+// For invalid values of n, the function panics.
+func hexchar(n byte) byte {
+ const hex = "0123456789abcdef"
+
+ // For n >= len(hex), runtime will panic.
+ return hex[n]
+}
+
+// needsEscape tells if a byte is NOT one of optionally-escaped bytes.
+func needsEscape(c byte) bool {
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ return false
+ }
+ switch c {
+ case '-', '_', '/', '\\', '.', '*':
+ return false
+ }
+
+ return true
+}
+
+// strNeedsEscape tells how many bytes in the string need escaping.
+func strNeedsEscape(val string) int {
+ count := 0
+
+ for i := 0; i < len(val); i++ {
+ if needsEscape(val[i]) {
+ count++
+ }
+ }
+
+ return count
+}
diff --git a/vendor/github.com/godbus/dbus/v5/export.go b/vendor/github.com/godbus/dbus/v5/export.go
index 522334715..d3dd9f7cd 100644
--- a/vendor/github.com/godbus/dbus/v5/export.go
+++ b/vendor/github.com/godbus/dbus/v5/export.go
@@ -3,6 +3,7 @@ package dbus
import (
"errors"
"fmt"
+ "os"
"reflect"
"strings"
)
@@ -209,28 +210,23 @@ func (conn *Conn) handleCall(msg *Message) {
}
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
- conn.sendMessageAndIfClosed(reply, nil)
+ if err := reply.IsValid(); err != nil {
+ fmt.Fprintf(os.Stderr, "dbus: dropping invalid reply to %s.%s on obj %s: %s\n", ifaceName, name, path, err)
+ } else {
+ conn.sendMessageAndIfClosed(reply, nil)
+ }
}
}
// Emit emits the given signal on the message bus. The name parameter must be
// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
- if !path.IsValid() {
- return errors.New("dbus: invalid object path")
- }
i := strings.LastIndex(name, ".")
if i == -1 {
return errors.New("dbus: invalid method name")
}
iface := name[:i]
member := name[i+1:]
- if !isValidMember(member) {
- return errors.New("dbus: invalid method name")
- }
- if !isValidInterface(iface) {
- return errors.New("dbus: invalid interface name")
- }
msg := new(Message)
msg.Type = TypeSignal
msg.Headers = make(map[HeaderField]Variant)
@@ -241,6 +237,9 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
+ if err := msg.IsValid(); err != nil {
+ return err
+ }
var closed bool
conn.sendMessageAndIfClosed(msg, func() {
diff --git a/vendor/github.com/godbus/dbus/v5/homedir.go b/vendor/github.com/godbus/dbus/v5/homedir.go
index 0b745f931..c44d9b5fc 100644
--- a/vendor/github.com/godbus/dbus/v5/homedir.go
+++ b/vendor/github.com/godbus/dbus/v5/homedir.go
@@ -2,27 +2,24 @@ package dbus
import (
"os"
- "sync"
-)
-
-var (
- homeDir string
- homeDirLock sync.Mutex
+ "os/user"
)
+// Get returns the home directory of the current user, which is usually the
+// value of HOME environment variable. In case it is not set or empty, os/user
+// package is used.
+//
+// If linking statically with cgo enabled against glibc, make sure the
+// osusergo build tag is used.
+//
+// If needing to do nss lookups, do not disable cgo or set osusergo.
func getHomeDir() string {
- homeDirLock.Lock()
- defer homeDirLock.Unlock()
-
+ homeDir := os.Getenv("HOME")
if homeDir != "" {
return homeDir
}
-
- homeDir = os.Getenv("HOME")
- if homeDir != "" {
- return homeDir
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
}
-
- homeDir = lookupHomeDir()
- return homeDir
+ return "/"
}
diff --git a/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go b/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go
deleted file mode 100644
index 2732081e7..000000000
--- a/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !static_build
-
-package dbus
-
-import (
- "os/user"
-)
-
-func lookupHomeDir() string {
- u, err := user.Current()
- if err != nil {
- return "/"
- }
- return u.HomeDir
-}
diff --git a/vendor/github.com/godbus/dbus/v5/homedir_static.go b/vendor/github.com/godbus/dbus/v5/homedir_static.go
deleted file mode 100644
index b9d9cb552..000000000
--- a/vendor/github.com/godbus/dbus/v5/homedir_static.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build static_build
-
-package dbus
-
-import (
- "bufio"
- "os"
- "strconv"
- "strings"
-)
-
-func lookupHomeDir() string {
- myUid := os.Getuid()
-
- f, err := os.Open("/etc/passwd")
- if err != nil {
- return "/"
- }
- defer f.Close()
-
- s := bufio.NewScanner(f)
-
- for s.Scan() {
- if err := s.Err(); err != nil {
- break
- }
-
- line := strings.TrimSpace(s.Text())
- if line == "" {
- continue
- }
-
- parts := strings.Split(line, ":")
-
- if len(parts) >= 6 {
- uid, err := strconv.Atoi(parts[2])
- if err == nil && uid == myUid {
- return parts[5]
- }
- }
- }
-
- // Default to / if we can't get a better value
- return "/"
-}
diff --git a/vendor/github.com/godbus/dbus/v5/message.go b/vendor/github.com/godbus/dbus/v5/message.go
index 16693eb30..bdf43fdd6 100644
--- a/vendor/github.com/godbus/dbus/v5/message.go
+++ b/vendor/github.com/godbus/dbus/v5/message.go
@@ -208,7 +208,7 @@ func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) {
// The possibly returned error can be an error of the underlying reader, an
// InvalidMessageError or a FormatError.
func DecodeMessage(rd io.Reader) (msg *Message, err error) {
- return DecodeMessageWithFDs(rd, make([]int, 0));
+ return DecodeMessageWithFDs(rd, make([]int, 0))
}
type nullwriter struct{}
@@ -227,8 +227,8 @@ func (msg *Message) CountFds() (int, error) {
}
func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds []int, err error) {
- if err := msg.IsValid(); err != nil {
- return make([]int, 0), err
+ if err := msg.validateHeader(); err != nil {
+ return nil, err
}
var vs [7]interface{}
switch order {
@@ -237,7 +237,7 @@ func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds
case binary.BigEndian:
vs[0] = byte('B')
default:
- return make([]int, 0), errors.New("dbus: invalid byte order")
+ return nil, errors.New("dbus: invalid byte order")
}
body := new(bytes.Buffer)
fds = make([]int, 0)
@@ -284,8 +284,13 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) (err error)
}
// IsValid checks whether msg is a valid message and returns an
-// InvalidMessageError if it is not.
+// InvalidMessageError or FormatError if it is not.
func (msg *Message) IsValid() error {
+ var b bytes.Buffer
+ return msg.EncodeTo(&b, nativeEndian)
+}
+
+func (msg *Message) validateHeader() error {
if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 {
return InvalidMessageError("invalid flags")
}
@@ -330,6 +335,7 @@ func (msg *Message) IsValid() error {
return InvalidMessageError("missing signature")
}
}
+
return nil
}
diff --git a/vendor/github.com/godbus/dbus/v5/server_interfaces.go b/vendor/github.com/godbus/dbus/v5/server_interfaces.go
index 79d97edf3..e4e0389fd 100644
--- a/vendor/github.com/godbus/dbus/v5/server_interfaces.go
+++ b/vendor/github.com/godbus/dbus/v5/server_interfaces.go
@@ -63,7 +63,7 @@ type Method interface {
// any other decoding scheme.
type ArgumentDecoder interface {
// To decode the arguments of a method the sender and message are
- // provided incase the semantics of the implementer provides access
+ // provided in case the semantics of the implementer provides access
// to these as part of the method invocation.
DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error)
}
diff --git a/vendor/github.com/godbus/dbus/v5/sig.go b/vendor/github.com/godbus/dbus/v5/sig.go
index 41a039812..6b9cadb5f 100644
--- a/vendor/github.com/godbus/dbus/v5/sig.go
+++ b/vendor/github.com/godbus/dbus/v5/sig.go
@@ -102,7 +102,7 @@ func getSignature(t reflect.Type, depth *depthCounter) (sig string) {
}
}
if len(s) == 0 {
- panic("empty struct")
+ panic(InvalidTypeError{t})
}
return "(" + s + ")"
case reflect.Array, reflect.Slice:
diff --git a/vendor/github.com/godbus/dbus/v5/transport_unix.go b/vendor/github.com/godbus/dbus/v5/transport_unix.go
index 2212e7fa7..0a8c712eb 100644
--- a/vendor/github.com/godbus/dbus/v5/transport_unix.go
+++ b/vendor/github.com/godbus/dbus/v5/transport_unix.go
@@ -154,17 +154,15 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
// substitute the values in the message body (which are indices for the
// array receiver via OOB) with the actual values
for i, v := range msg.Body {
- switch v.(type) {
+ switch index := v.(type) {
case UnixFDIndex:
- j := v.(UnixFDIndex)
- if uint32(j) >= unixfds {
+ if uint32(index) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}
- msg.Body[i] = UnixFD(fds[j])
+ msg.Body[i] = UnixFD(fds[index])
case []UnixFDIndex:
- idxArray := v.([]UnixFDIndex)
- fdArray := make([]UnixFD, len(idxArray))
- for k, j := range idxArray {
+ fdArray := make([]UnixFD, len(index))
+ for k, j := range index {
if uint32(j) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_zos.go b/vendor/github.com/godbus/dbus/v5/transport_zos.go
new file mode 100644
index 000000000..1bba0d6bf
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_zos.go
@@ -0,0 +1,6 @@
+package dbus
+
+func (t *unixTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
diff --git a/vendor/github.com/godbus/dbus/v5/variant.go b/vendor/github.com/godbus/dbus/v5/variant.go
index f1e81f3ed..ca3dbe16a 100644
--- a/vendor/github.com/godbus/dbus/v5/variant.go
+++ b/vendor/github.com/godbus/dbus/v5/variant.go
@@ -49,7 +49,7 @@ func ParseVariant(s string, sig Signature) (Variant, error) {
}
// format returns a formatted version of v and whether this string can be parsed
-// unambigously.
+// unambiguously.
func (v Variant) format() (string, bool) {
switch v.sig.str[0] {
case 'b', 'i':
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index e8ff994f8..f7f74c153 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,19 @@ This package provides various compression algorithms.
# changelog
+* Feb 17, 2022 (v1.14.3)
+ * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
+ * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
+ * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
+
+* Jan 25, 2022 (v1.14.2)
+ * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
+ * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
+ * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
+ * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
+ * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
+ * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
+
* Jan 11, 2022 (v1.14.1)
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
@@ -53,6 +66,9 @@ This package provides various compression algorithms.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+<details>
+ <summary>See changes to v1.12.x</summary>
+
* May 25, 2021 (v1.12.3)
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
@@ -74,9 +90,10 @@ This package provides various compression algorithms.
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+</details>
<details>
- <summary>See changes prior to v1.12.1</summary>
+ <summary>See changes to v1.11.x</summary>
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
@@ -135,7 +152,7 @@ This package provides various compression algorithms.
</details>
<details>
- <summary>See changes prior to v1.11.0</summary>
+ <summary>See changes to v1.10.x</summary>
* July 8, 2020 (v1.10.11)
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
@@ -297,11 +314,6 @@ This package provides various compression algorithms.
# deflate usage
-* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
-* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
-* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
-
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation
@@ -323,6 +335,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using
the stateless compress described below.
+For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index 0b2e54972..d55ea2a77 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
- if debugDecode {
+ if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index fd49efd75..25f6d1108 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -8,6 +8,7 @@ import (
"encoding/binary"
"fmt"
"io"
+ "math"
)
const (
@@ -24,6 +25,10 @@ const (
codegenCodeCount = 19
badCode = 255
+ // maxPredefinedTokens is the maximum number of tokens
+ // where we check if fixed size is smaller.
+ maxPredefinedTokens = 250
+
// bufferFlushSize indicates the buffer size
// after which bytes are flushed to the writer.
// Should preferably be a multiple of 6, since
@@ -36,8 +41,11 @@ const (
bufferSize = bufferFlushSize + 8
)
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
// The number of extra bits needed by length code X - LENGTH_CODES_START.
-var lengthExtraBits = [32]int8{
+var lengthExtraBits = [32]uint8{
/* 257 */ 0, 0, 0,
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
@@ -51,6 +59,9 @@ var lengthBase = [32]uint8{
64, 80, 96, 112, 128, 160, 192, 224, 255,
}
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
// offset code word extra bits.
var offsetExtraBits = [32]int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
@@ -78,10 +89,10 @@ func init() {
for i := range offsetCombined[:] {
// Don't use extended window values...
- if offsetBase[i] > 0x006000 {
+ if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
continue
}
- offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i])
+ offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
}
}
@@ -97,7 +108,7 @@ type huffmanBitWriter struct {
// Data waiting to be written is bytes[0:nbytes]
// and then the low nbits of bits.
bits uint64
- nbits uint16
+ nbits uint8
nbytes uint8
lastHuffMan bool
literalEncoding *huffmanEncoder
@@ -215,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) {
_, w.err = w.writer.Write(b)
}
-func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
w.bits |= uint64(b) << (w.nbits & 63)
w.nbits += nb
if w.nbits >= 48 {
@@ -571,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
// Fixed Huffman baseline.
var literalEncoding = fixedLiteralEncoding
var offsetEncoding = fixedOffsetEncoding
- var size = w.fixedSize(extraBits)
+ var size = math.MaxInt32
+ if tokens.n < maxPredefinedTokens {
+ size = w.fixedSize(extraBits)
+ }
// Dynamic Huffman?
var numCodegens int
@@ -672,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
size = reuseSize
}
- if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
- // Check if we get a reasonable size decrease.
- if storable && ssize <= size {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
return
}
- w.writeFixedHeader(eof)
- if !sync {
- tokens.AddEOB()
- }
- w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
- return
}
// Check if we get a reasonable size decrease.
if storable && ssize <= size {
@@ -717,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
// Store predefined, if we don't get a reasonable improvement.
- if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
- // Store bytes, if we don't get an improvement.
- if storable && ssize <= preSize {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
return
}
- w.writeFixedHeader(eof)
- if !sync {
- tokens.AddEOB()
- }
- w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
- return
}
if storable && ssize <= size {
@@ -833,9 +851,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
for _, t := range tokens {
- if t < matchType {
+ if t < 256 {
//w.writeCode(lits[t.literal()])
- c := lits[t.literal()]
+ c := lits[t]
bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
@@ -858,12 +876,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
// Write the length
length := t.length()
- lengthCode := lengthCode(length)
+ lengthCode := lengthCode(length) & 31
if false {
- w.writeCode(lengths[lengthCode&31])
+ w.writeCode(lengths[lengthCode])
} else {
// inlined
- c := lengths[lengthCode&31]
+ c := lengths[lengthCode]
bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
@@ -883,10 +901,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
}
- extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
- if extraLengthBits > 0 {
+ if lengthCode >= lengthExtraBitsMinCode {
+ extraLengthBits := lengthExtraBits[lengthCode]
//w.writeBits(extraLength, extraLengthBits)
- extraLength := int32(length - lengthBase[lengthCode&31])
+ extraLength := int32(length - lengthBase[lengthCode])
bits |= uint64(extraLength) << (nbits & 63)
nbits += extraLengthBits
if nbits >= 48 {
@@ -907,10 +925,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
// Write the offset
offset := t.offset()
- offsetCode := offset >> 16
- offset &= matchOffsetOnlyMask
+ offsetCode := (offset >> 16) & 31
if false {
- w.writeCode(offs[offsetCode&31])
+ w.writeCode(offs[offsetCode])
} else {
// inlined
c := offs[offsetCode]
@@ -932,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
}
}
- offsetComb := offsetCombined[offsetCode]
- if offsetComb > 1<<16 {
+
+ if offsetCode >= offsetExtraBitsMinCode {
+ offsetComb := offsetCombined[offsetCode]
//w.writeBits(extraOffset, extraOffsetBits)
- bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
- nbits += uint16(offsetComb >> 16)
+ bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+ nbits += uint8(offsetComb)
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -1002,6 +1020,29 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// https://stackoverflow.com/a/25454430
const guessHeaderSizeBits = 70 * 8
histogram(input, w.literalFreq[:numLiterals], fill)
+ ssize, storable := w.storedSize(input)
+ if storable && len(input) > 1024 {
+ // Quick check for incompressible content.
+ abs := float64(0)
+ avg := float64(len(input)) / 256
+ max := float64(len(input) * 2)
+ for _, v := range w.literalFreq[:256] {
+ diff := float64(v) - avg
+ abs += diff * diff
+ if abs > max {
+ break
+ }
+ }
+ if abs < max {
+ if debugDeflate {
+ fmt.Println("stored", abs, "<", max)
+ }
+ // No chance we can compress this...
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ }
w.literalFreq[endBlockMarker] = 1
w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
if fill {
@@ -1019,8 +1060,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
estBits += estBits >> w.logNewTablePenalty
// Store bytes, if we don't get a reasonable improvement.
- ssize, storable := w.storedSize(input)
if storable && ssize <= estBits {
+ if debugDeflate {
+ fmt.Println("stored,", ssize, "<=", estBits)
+ }
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
@@ -1031,7 +1074,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
if estBits < reuseSize {
if debugDeflate {
- //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+ fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
}
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
@@ -1065,6 +1108,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+ if debugDeflate {
+ count -= int(nbytes)*8 + int(nbits)
+ }
// Unroll, write 3 codes/loop.
// Fastest number of unrolls.
for len(input) > 3 {
@@ -1074,13 +1120,16 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
bits >>= (n * 8) & 63
nbits -= n * 8
- nbytes += uint8(n)
+ nbytes += n
}
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
@@ -1096,13 +1145,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Remaining...
for _, t := range input {
- // Bitwriting inlined, ~30% speedup
- c := encoding[t]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
- if debugDeflate {
- count += int(c.len)
- }
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -1114,17 +1156,33 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
nbytes = 0
return
}
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ bits |= uint64(c.code) << (nbits & 63)
+ nbits += c.len
+ if debugDeflate {
+ count += int(c.len)
+ }
}
// Restore...
w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
if debugDeflate {
- fmt.Println("wrote", count/8, "bytes")
+ nb := count + int(nbytes)*8 + int(nbits)
+ fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
+ }
+ // Flush if needed to have space.
+ if w.nbits >= 48 {
+ w.writeOutBits()
}
+
if eof || sync {
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index f35e00261..9ab497c27 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -17,7 +17,8 @@ const (
// hcode is a huffman code with a bit code and bit length.
type hcode struct {
- code, len uint16
+ code uint16
+ len uint8
}
type huffmanEncoder struct {
@@ -56,7 +57,7 @@ type levelInfo struct {
}
// set sets the code and length of an hcode.
-func (h *hcode) set(code uint16, length uint16) {
+func (h *hcode) set(code uint16, length uint8) {
h.len = length
h.code = code
}
@@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
var ch uint16
for ch = 0; ch < literalCount; ch++ {
var bits uint16
- var size uint16
+ var size uint8
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
@@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
bits = ch + 192 - 280
size = 8
}
- codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
+ codes[ch] = hcode{code: reverseBits(bits, size), len: size}
}
return h
}
@@ -187,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
+ // Descending to only have 1 bounds check.
+ l2f := int32(list[2].freq)
+ l1f := int32(list[1].freq)
+ l0f := int32(list[0].freq) + int32(list[1].freq)
+
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
- lastFreq: int32(list[1].freq),
- nextCharFreq: int32(list[2].freq),
- nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
+ lastFreq: l1f,
+ nextCharFreq: l2f,
+ nextPairFreq: l0f,
}
leafCounts[level][level] = 2
if level == 1 {
@@ -205,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
- level := maxBits
- for {
+ level := uint32(maxBits)
+ for level < 16 {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leafs and pairs.
@@ -238,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
- copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ if true {
+ save := leafCounts[level][level]
+ leafCounts[level] = leafCounts[level-1]
+ leafCounts[level][level] = save
+ } else {
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ }
levels[l.level-1].needed = 2
}
@@ -296,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
sortByLiteral(chunk)
for _, node := range chunk {
- h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
+ h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)}
code++
}
list = list[0 : len(list)-int(bits)]
@@ -309,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list := h.freqcache[:len(freq)+1]
+ codes := h.codes[:len(freq)]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
@@ -317,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list[count] = literalNode{uint16(i), f}
count++
} else {
- list[count] = literalNode{}
- h.codes[i].len = 0
+ codes[i].len = 0
}
}
- list[len(freq)] = literalNode{}
+ list[count] = literalNode{}
list = list[:count]
if count <= 2 {
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index d5f62f6a2..414c0bea9 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -36,6 +36,13 @@ type lengthExtra struct {
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
@@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error {
return nil
}
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBlockGeneric() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := f.r.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
- }
-
- var dist uint32
- if f.hd == nil {
- for f.nb < 5 {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- dist = uint32(sym)
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<nb:", err)
- }
- f.err = err
- return
- }
- }
- extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
- dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
index cc6db2792..8d632cea0 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() {
)
fr := f.r.(*bytes.Buffer)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb := f.nb, f.b
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -39,41 +44,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -88,10 +87,12 @@ readLiteral:
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -101,9 +102,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -111,25 +113,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -137,12 +141,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -152,38 +156,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -197,9 +198,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -207,14 +209,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -224,6 +228,7 @@ readLiteral:
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -248,10 +253,12 @@ copyHistory:
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() {
)
fr := f.r.(*bytes.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb := f.nb, f.b
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -283,41 +295,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -332,10 +338,12 @@ readLiteral:
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBytesReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -345,9 +353,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -355,25 +364,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -381,12 +392,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -396,38 +407,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -441,9 +449,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -451,14 +460,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -468,6 +479,7 @@ readLiteral:
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -492,10 +504,12 @@ copyHistory:
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBytesReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() {
)
fr := f.r.(*bufio.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb := f.nb, f.b
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -527,41 +546,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -576,10 +589,12 @@ readLiteral:
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBufioReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -589,9 +604,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -599,25 +615,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -625,12 +643,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -640,38 +658,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -685,9 +700,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -695,14 +711,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -712,6 +730,7 @@ readLiteral:
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -736,10 +755,12 @@ copyHistory:
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBufioReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() {
)
fr := f.r.(*strings.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb := f.nb, f.b
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -771,41 +797,286 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ f.dict.writeByte(byte(v))
+ if f.dict.availWrite() == 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(f.dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb := f.nb, f.b
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -818,12 +1089,14 @@ readLiteral:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader
+ f.step = (*decompressor).huffmanGenericReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -833,9 +1106,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -843,25 +1117,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -869,12 +1145,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -884,38 +1160,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -929,9 +1202,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -939,14 +1213,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -956,6 +1232,7 @@ readLiteral:
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
@@ -978,12 +1255,14 @@ copyHistory:
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ f.step = (*decompressor).huffmanGenericReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
func (f *decompressor) huffmanBlockDecoder() func() {
@@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() {
return f.huffmanBufioReader
case *strings.Reader:
return f.huffmanStringsReader
+ case Reader:
+ return f.huffmanGenericReader
default:
- return f.huffmanBlockGeneric
+ return f.huffmanGenericReader
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
index 1e5eea396..0022c8bb6 100644
--- a/vendor/github.com/klauspost/compress/flate/level1.go
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -1,6 +1,10 @@
package flate
-import "fmt"
+import (
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
@@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
+ var l = int32(4)
+ if false {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else {
+ // inlined:
+ a := src[s+4:]
+ b := src[t+4:]
+ for len(a) >= 8 {
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ l += int32(bits.TrailingZeros64(diff) >> 3)
+ break
+ }
+ l += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ if len(a) < 8 {
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ l++
+ }
+ }
+ }
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
@@ -129,7 +158,28 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
}
// Save the match found
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ if false {
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ } else {
+ // Inlined...
+ xoffset := uint32(s - t - baseMatchOffset)
+ xlength := l
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ // We need to have at least baseMatchLength left over for next loop.
+ xl = 258 - baseMatchLength
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ dst.extraHist[lengthCodes1[uint8(xl)]]++
+ dst.offHist[oc]++
+ dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+ dst.n++
+ }
+ }
s += l
nextEmit = s
if nextS >= s {
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
index c22b4244a..465b5e0ba 100644
--- a/vendor/github.com/klauspost/compress/flate/level3.go
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -5,7 +5,7 @@ import "fmt"
// fastEncL3
type fastEncL3 struct {
fastGen
- table [tableSize]tableEntryPrev
+ table [1 << 16]tableEntryPrev
}
// Encode uses a similar algorithm to level 2, will check up to two candidates.
@@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
const (
inputMargin = 8 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ tableBits = 16
+ tableSize = 1 << tableBits
)
if debugDeflate && e.cur < 0 {
@@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
nextS := s
var candidate tableEntry
for {
- nextHash := hash(cv)
+ nextHash := hash4u(cv, tableBits)
s = nextS
nextS = s + 1 + (s-nextEmit)>>skipLog
if nextS > sLimit {
@@ -156,7 +158,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
// Index first pair after match end.
if int(t+4) < len(src) && t > 0 {
cv := load3232(src, t)
- nextHash := hash(cv)
+ nextHash := hash4u(cv, tableBits)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + t},
@@ -165,30 +167,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
goto emitRemainder
}
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-3 to s.
- x := load6432(src, s-3)
- prevHash := hash(uint32(x))
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 3},
+ // Store every 5th hash in-between.
+ for i := s - l + 2; i < s-5; i += 5 {
+ nextHash := hash4u(load3232(src, i), tableBits)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + i}}
}
- x >>= 8
- prevHash = hash(uint32(x))
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s.
+ x := load6432(src, s-2)
+ prevHash := hash4u(uint32(x), tableBits)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 2},
}
x >>= 8
- prevHash = hash(uint32(x))
+ prevHash = hash4u(uint32(x), tableBits)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 1},
}
x >>= 8
- currHash := hash(uint32(x))
+ currHash := hash4u(uint32(x), tableBits)
candidates := e.table[currHash]
cv = uint32(x)
e.table[currHash] = tableEntryPrev{
@@ -200,15 +203,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
candidate = candidates.Cur
minOffset := e.cur + s - (maxMatchOffset - 4)
- if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
+ if candidate.offset > minOffset {
+ if cv == load3232(src, candidate.offset-e.cur) {
+ // Found a match...
+ continue
+ }
candidate = candidates.Prev
if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- continue
- }
+ // Match at prev...
+ continue
}
}
cv = uint32(x >> 8)
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
index 3a9618ee1..8005a5ca8 100644
--- a/vendor/github.com/klauspost/compress/flate/token.go
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -13,11 +13,10 @@ import (
)
const (
- // From top
- // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
- // 8 bits: xlength = length - MIN_MATCH_LENGTH
- // 5 bits offsetcode
- // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
+ // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+ // bits 16-22 offsetcode - 5 bits
+ // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
+ // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
lengthShift = 22
offsetMask = 1<<lengthShift - 1
typeMask = 3 << 30
@@ -276,7 +275,7 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
xoffset |= oCode << 16
t.extraHist[lengthCodes1[uint8(xlength)]]++
- t.offHist[oCode]++
+ t.offHist[oCode&31]++
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
t.n++
}
@@ -300,7 +299,7 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
xlength -= xl
xl -= baseMatchLength
t.extraHist[lengthCodes1[uint8(xl)]]++
- t.offHist[oc]++
+ t.offHist[oc&31]++
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
t.n++
}
@@ -356,8 +355,8 @@ func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint8 { return uint8(t >> lengthShift) }
-// The code is never more than 8 bits, but is returned as uint32 for convenience.
-func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 2a06bd1a7..2668b64d3 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"io"
+ "sync"
"github.com/klauspost/compress/fse"
)
@@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder {
return &Decoder{
dt: s.dt,
actualTableLog: s.actualTableLog,
+ bufs: &s.decPool,
}
}
@@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder {
type Decoder struct {
dt dTable
actualTableLog uint8
+ bufs *sync.Pool
+}
+
+func (d *Decoder) buffer() *[4][256]byte {
+ buf, ok := d.bufs.Get().(*[4][256]byte)
+ if ok {
+ return buf
+ }
+ return &[4][256]byte{}
}
// Decompress1X will decompress a 1X encoded stream.
@@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
for br.off >= 8 {
@@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
}
}
if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
switch d.actualTableLog {
@@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
}
}
default:
+ d.bufs.Put(bufs)
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
}
if len(dst) >= maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
v := dt[br.peekByteFast()>>shift]
@@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
const shift = 56
@@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
}
if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -688,6 +721,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -735,12 +769,12 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 2 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -758,8 +792,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
@@ -767,8 +801,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
}
{
@@ -783,8 +817,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
@@ -792,25 +826,26 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
}
off += 2
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
- decoded += 256
+ decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
@@ -818,12 +853,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
@@ -853,6 +889,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
}
// end inline...
if offset >= len(out) {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -871,6 +908,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
@@ -916,12 +954,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -942,8 +980,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -951,8 +989,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -960,8 +998,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -969,8 +1007,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- buf[off+bufoff*stream+3] = uint8(v >> 8)
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
@@ -987,8 +1025,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -996,8 +1034,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -1005,8 +1043,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -1014,25 +1052,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- buf[off+bufoff*stream+3] = uint8(v >> 8)
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
- decoded += 256
+ decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
@@ -1040,12 +1079,13 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
@@ -1057,6 +1097,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -1077,6 +1118,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
}
// end inline...
if offset >= len(out) {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -1091,9 +1133,11 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
@@ -1135,12 +1179,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -1150,104 +1194,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
- v := single[uint8(br[stream].value>>shift)].entry
- v2 := single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
const stream = 2
const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
- v := single[uint8(br[stream].value>>shift)].entry
- v2 := single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
- decoded += 256
+ decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
@@ -1257,10 +1306,10 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
@@ -1272,6 +1321,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -1292,6 +1342,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
}
// end inline...
if offset >= len(out) {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -1306,9 +1357,11 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 3ee00ecb4..e8ad17ad0 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"math/bits"
+ "sync"
"github.com/klauspost/compress/fse"
)
@@ -116,6 +117,7 @@ type Scratch struct {
nodes []nodeElt
tmpOut [4][]byte
fse *fse.Scratch
+ decPool sync.Pool // *[4][256]byte buffers.
huffWeight [maxSymbolValue + 1]byte
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index 5f08a2830..f51ab529a 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 7
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 8
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
index 30572ea87..6a6acfd9a 100644
--- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
+++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go
@@ -56,6 +56,9 @@ func (v *GVariant) native() *C.GVariant {
}
func (v *GVariant) Ptr() unsafe.Pointer {
+ if v == nil {
+ return nil
+ }
return v.ptr
}
diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md
index 30ea0e758..dea3e409e 100644
--- a/vendor/github.com/sylabs/sif/v2/LICENSE.md
+++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md
@@ -1,6 +1,6 @@
# LICENSE
-Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+Copyright (c) 2018-2022, Sylabs Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md
index 00059242c..a4f5f1458 100644
--- a/vendor/github.com/xeipuuv/gojsonpointer/README.md
+++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md
@@ -35,7 +35,7 @@ An implementation of JSON Pointer - Go language
## References
-http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+https://tools.ietf.org/html/rfc6901
### Note
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
diff --git a/vendor/golang.org/x/term/codereview.cfg b/vendor/golang.org/x/term/codereview.cfg
new file mode 100644
index 000000000..3f8b14b64
--- /dev/null
+++ b/vendor/golang.org/x/term/codereview.cfg
@@ -0,0 +1 @@
+issuerepo: golang/go
diff --git a/vendor/golang.org/x/term/term.go b/vendor/golang.org/x/term/term.go
index 1f6a38fad..d59270880 100644
--- a/vendor/golang.org/x/term/term.go
+++ b/vendor/golang.org/x/term/term.go
@@ -12,6 +12,8 @@
// panic(err)
// }
// defer term.Restore(int(os.Stdin.Fd()), oldState)
+//
+// Note that on non-Unix systems os.Stdin.Fd() may not be 0.
package term
// State contains the state of a terminal.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d82524493..072b9661a 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.47.5-0.20220222185251-194ee74231c3
+# github.com/containers/common v0.47.5-0.20220228211119-9880eb424fde
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests
@@ -153,7 +153,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.19.1
+# github.com/containers/image/v5 v5.19.2-0.20220224100137-1045fb70b094
## explicit
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -166,14 +166,15 @@ github.com/containers/image/v5/docker/policyconfiguration
github.com/containers/image/v5/docker/reference
github.com/containers/image/v5/image
github.com/containers/image/v5/internal/blobinfocache
+github.com/containers/image/v5/internal/imagedestination
+github.com/containers/image/v5/internal/imagesource
github.com/containers/image/v5/internal/iolimits
-github.com/containers/image/v5/internal/pkg/keyctl
github.com/containers/image/v5/internal/pkg/platform
+github.com/containers/image/v5/internal/private
github.com/containers/image/v5/internal/putblobdigest
github.com/containers/image/v5/internal/rootless
github.com/containers/image/v5/internal/streamdigest
github.com/containers/image/v5/internal/tmpdir
-github.com/containers/image/v5/internal/types
github.com/containers/image/v5/internal/uploadreader
github.com/containers/image/v5/manifest
github.com/containers/image/v5/oci/archive
@@ -202,7 +203,7 @@ github.com/containers/image/v5/transports
github.com/containers/image/v5/transports/alltransports
github.com/containers/image/v5/types
github.com/containers/image/v5/version
-# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b
+# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
github.com/containers/libtrust
# github.com/containers/ocicrypt v1.1.2
## explicit
@@ -396,7 +397,7 @@ github.com/ghodss/yaml
github.com/go-logr/logr
# github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0
github.com/go-task/slim-sprig
-# github.com/godbus/dbus/v5 v5.0.6
+# github.com/godbus/dbus/v5 v5.1.0
## explicit
github.com/godbus/dbus/v5
# github.com/gogo/protobuf v1.3.2
@@ -461,7 +462,7 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
-# github.com/klauspost/compress v1.14.2
+# github.com/klauspost/compress v1.14.4
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -598,7 +599,7 @@ github.com/openshift/imagebuilder/dockerfile/command
github.com/openshift/imagebuilder/dockerfile/parser
github.com/openshift/imagebuilder/signal
github.com/openshift/imagebuilder/strslice
-# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913
+# github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f
github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
# github.com/pkg/errors v0.9.1
@@ -657,7 +658,7 @@ github.com/stefanberger/go-pkcs11uri
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
-# github.com/sylabs/sif/v2 v2.3.1
+# github.com/sylabs/sif/v2 v2.3.2
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit
@@ -699,7 +700,7 @@ github.com/vishvananda/netlink
github.com/vishvananda/netlink/nl
# github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
github.com/vishvananda/netns
-# github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b
+# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
github.com/xeipuuv/gojsonpointer
# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
github.com/xeipuuv/gojsonreference
@@ -754,7 +755,7 @@ golang.org/x/net/trace
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
+# golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27
## explicit
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -763,7 +764,7 @@ golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
+# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/term
# golang.org/x/text v0.3.7
## explicit