summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/Microsoft/hcsshim/.golangci.yml3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/go.mod2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/go.sum3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go16
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go56
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go44
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go10
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go46
-rw-r--r--vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go9
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/network.go11
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/run.go5
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go13
-rw-r--r--vendor/github.com/containers/common/pkg/parse/parse.go20
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/go.mod10
-rw-r--r--vendor/github.com/containers/storage/go.sum25
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/cache_linux.go630
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go310
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go81
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/internal/compression.go32
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go961
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go36
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go6
-rw-r--r--vendor/github.com/containers/storage/store.go44
-rw-r--r--vendor/github.com/containers/storage/types/options.go39
-rw-r--r--vendor/github.com/klauspost/compress/.goreleaser.yml4
-rw-r--r--vendor/github.com/klauspost/compress/README.md14
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go133
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go189
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go9
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go234
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitreader.go15
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitwriter.go22
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockenc.go108
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_base.go24
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go139
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_encoder.go5
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s189
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go (renamed from vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go)7
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go4
-rw-r--r--vendor/modules.txt8
45 files changed, 2701 insertions, 827 deletions
diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
index 16b25be55..2400e7f1e 100644
--- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml
+++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
@@ -1,3 +1,6 @@
+run:
+ timeout: 8m
+
linters:
enable:
- stylecheck
diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod
index 7c9747667..9c60dd302 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.mod
+++ b/vendor/github.com/Microsoft/hcsshim/go.mod
@@ -29,7 +29,7 @@ require (
go.opencensus.io v0.22.3
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20210510120138-977fb7262007
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
google.golang.org/grpc v1.40.0
)
diff --git a/vendor/github.com/Microsoft/hcsshim/go.sum b/vendor/github.com/Microsoft/hcsshim/go.sum
index 7c383806d..93c37657f 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.sum
+++ b/vendor/github.com/Microsoft/hcsshim/go.sum
@@ -812,8 +812,9 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
index 644f0ab71..e21354ffd 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
@@ -78,6 +78,13 @@ var (
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
+
+ // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped.
+ ErrProcessAlreadyStopped = syscall.Errno(0x8037011f)
+
+ // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that
+ // compute system has already been closed.
+ ErrInvalidHandle = syscall.Errno(0x6)
)
type ErrorEvent struct {
@@ -249,6 +256,14 @@ func IsNotExist(err error) bool {
err == ErrElementNotFound
}
+// IsErrorInvalidHandle checks whether the error is the result of an operation carried
+// out on a handle that is invalid/closed. This error popped up while trying to query
+// stats on a container in the process of being stopped.
+func IsErrorInvalidHandle(err error) bool {
+ err = getInnerError(err)
+ return err == ErrInvalidHandle
+}
+
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool {
@@ -281,6 +296,7 @@ func IsTimeout(err error) bool {
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
+ err == ErrProcessAlreadyStopped ||
err == ErrElementNotFound
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
index 8f2034668..f4605922a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
@@ -3,7 +3,9 @@ package hcs
import (
"context"
"encoding/json"
+ "errors"
"io"
+ "os"
"sync"
"syscall"
"time"
@@ -16,16 +18,17 @@ import (
// ContainerError is an error encountered in HCS
type Process struct {
- handleLock sync.RWMutex
- handle vmcompute.HcsProcess
- processID int
- system *System
- hasCachedStdio bool
- stdioLock sync.Mutex
- stdin io.WriteCloser
- stdout io.ReadCloser
- stderr io.ReadCloser
- callbackNumber uintptr
+ handleLock sync.RWMutex
+ handle vmcompute.HcsProcess
+ processID int
+ system *System
+ hasCachedStdio bool
+ stdioLock sync.Mutex
+ stdin io.WriteCloser
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+ callbackNumber uintptr
+ killSignalDelivered bool
closedWaitOnce sync.Once
waitBlock chan struct{}
@@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
+ if process.killSignalDelivered {
+ // A kill signal has already been sent to this process. Sending a second
+ // one offers no real benefit, as processes cannot stop themselves from
+ // being terminated, once a TerminateProcess has been issued. Sending a
+ // second kill may result in a number of errors (two of which detailed bellow)
+ // and which we can avoid handling.
+ return true, nil
+ }
+
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
+ if err != nil {
+ // We still need to check these two cases, as processes may still be killed by an
+ // external actor (human operator, OOM, random script etc).
+ if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) {
+ // There are two cases where it should be safe to ignore an error returned
+ // by HcsTerminateProcess. The first one is cause by the fact that
+ // HcsTerminateProcess ends up calling TerminateProcess in the context
+ // of a container. According to the TerminateProcess documentation:
+ // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks
+ // After a process has terminated, call to TerminateProcess with open
+ // handles to the process fails with ERROR_ACCESS_DENIED (5) error code.
+ // It's safe to ignore this error here. HCS should always have permissions
+ // to kill processes inside any container. So an ERROR_ACCESS_DENIED
+ // is unlikely to be anything else than what the ending remarks in the
+ // documentation states.
+ //
+ // The second case is generated by hcs itself, if for any reason HcsTerminateProcess
+ // is called twice in a very short amount of time. In such cases, hcs may return
+ // HCS_E_PROCESS_ALREADY_STOPPED.
+ return true, nil
+ }
+ }
events := processHcsResult(ctx, resultJSON)
delivered, err := process.processSignalResult(ctx, err)
if err != nil {
err = makeProcessError(process, operation, err, events)
}
+
+ process.killSignalDelivered = delivered
return delivered, err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
new file mode 100644
index 000000000..def952541
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
@@ -0,0 +1,44 @@
+package winapi
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
+
+// CreatePseudoConsole creates a windows pseudo console.
+func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon)
+}
+
+// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
+func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size))))
+}
+
+// HRESULT WINAPI CreatePseudoConsole(
+// _In_ COORD size,
+// _In_ HANDLE hInput,
+// _In_ HANDLE hOutput,
+// _In_ DWORD dwFlags,
+// _Out_ HPCON* phPC
+// );
+//
+//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole
+
+// void WINAPI ClosePseudoConsole(
+// _In_ HPCON hPC
+// );
+//
+//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole
+
+// HRESULT WINAPI ResizePseudoConsole(
+// _In_ HPCON hPC ,
+// _In_ COORD size
+// );
+//
+//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
index b87068327..37839435b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
@@ -2,9 +2,7 @@ package winapi
const PROCESS_ALL_ACCESS uint32 = 2097151
-// DWORD GetProcessImageFileNameW(
-// HANDLE hProcess,
-// LPWSTR lpImageFileName,
-// DWORD nSize
-// );
-//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW
+const (
+ PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
+ PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
+)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
index ec88c0d21..1d4ba3c4f 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
@@ -2,4 +2,4 @@
// be thought of as an extension to golang.org/x/sys/windows.
package winapi
-//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
+//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
index 59ddee274..4eb64b4c0 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
@@ -37,12 +37,15 @@ func errnoErr(e syscall.Errno) error {
}
var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
- modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
+ procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole")
+ procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
+ procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procSearchPathW = modkernel32.NewProc("SearchPathW")
@@ -58,7 +61,6 @@ var (
procLogonUserW = modadvapi32.NewProc("LogonUserW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
- procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW")
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
@@ -71,6 +73,33 @@ var (
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
)
+func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
+ r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
+func ClosePseudoConsole(hpc windows.Handle) {
+ syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
+ return
+}
+
+func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
+ r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
status = uint32(r0)
@@ -227,19 +256,6 @@ func LocalFree(ptr uintptr) {
return
}
-func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize))
- size = uint32(r0)
- if size == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
amount = uint32(r0)
diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
index 49fb740cd..75dce5d82 100644
--- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
+++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
@@ -38,4 +38,13 @@ const (
// V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
V21H1 = 19043
+
+ // V21H2Win10 corresponds to Windows 10 (November 2021 Update).
+ V21H2Win10 = 19044
+
+ // V21H2Server corresponds to Windows Server 2022 (ltsc2022).
+ V21H2Server = 20348
+
+ // V21H2Win11 corresponds to Windows 11 (original release).
+ V21H2Win11 = 22000
)
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go
index ce252bc1d..9ed09f3f4 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/network.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
"github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/unshare"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -21,6 +22,12 @@ type netavarkNetwork struct {
// networkConfigDir is directory where the network config files are stored.
networkConfigDir string
+ // networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc
+ networkRunDir string
+
+ // tells netavark wheather this is rootless mode or rootfull, "true" or "false"
+ networkRootless bool
+
// netavarkBinary is the path to the netavark binary.
netavarkBinary string
@@ -53,7 +60,7 @@ type InitConfig struct {
// NetavarkBinary is the path to the netavark binary.
NetavarkBinary string
- // NetworkRunDir is where temporary files are stored, i.e.the ipam db.
+ // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config
NetworkRunDir string
// DefaultNetwork is the name for the default network.
@@ -99,7 +106,9 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
n := &netavarkNetwork{
networkConfigDir: conf.NetworkConfigDir,
+ networkRunDir: conf.NetworkRunDir,
netavarkBinary: conf.NetavarkBinary,
+ networkRootless: unshare.IsRootless(),
ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"),
defaultNetwork: defaultNetworkName,
defaultSubnet: defaultNet,
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go
index c6f2007e2..b8be6e632 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/run.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go
@@ -5,6 +5,7 @@ package netavark
import (
"encoding/json"
"fmt"
+ "strconv"
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
@@ -54,7 +55,7 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions
}
result := map[string]types.StatusBlock{}
- err = n.execNetavark([]string{"setup", namespacePath}, netavarkOpts, &result)
+ err = n.execNetavark([]string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "setup", namespacePath}, netavarkOpts, &result)
if err != nil {
// lets dealloc ips to prevent leaking
if err := n.deallocIPs(&options.NetworkOptions); err != nil {
@@ -94,7 +95,7 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO
return errors.Wrap(err, "failed to convert net opts")
}
- retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil)
+ retErr := n.execNetavark([]string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "teardown", namespacePath}, netavarkOpts, nil)
// when netavark returned an error we still free the used ips
// otherwise we could end up in a state where block the ips forever
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 6837a378a..0db1004df 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -72,6 +72,8 @@ type Config struct {
Network NetworkConfig `toml:"network"`
// Secret section defines configurations for the secret management
Secrets SecretConfig `toml:"secrets"`
+ // ConfigMap section defines configurations for the configmaps management
+ ConfigMaps ConfigMapConfig `toml:"configmaps"`
}
// ContainersConfig represents the "containers" TOML config table
@@ -514,6 +516,17 @@ type SecretConfig struct {
Opts map[string]string `toml:"opts,omitempty"`
}
+// ConfigMapConfig represents the "configmap" TOML config table
+type ConfigMapConfig struct {
+ // Driver specifies the configmap driver to use.
+ // Current valid value:
+ // * file
+ // * pass
+ Driver string `toml:"driver,omitempty"`
+ // Opts contains driver specific options
+ Opts map[string]string `toml:"opts,omitempty"`
+}
+
// MachineConfig represents the "machine" TOML config table
type MachineConfig struct {
// Number of CPU's a machine is created with.
diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go
index fda129c83..5d826e805 100644
--- a/vendor/github.com/containers/common/pkg/parse/parse.go
+++ b/vendor/github.com/containers/common/pkg/parse/parse.go
@@ -14,9 +14,27 @@ import (
// ValidateVolumeOpts validates a volume's options
func ValidateVolumeOpts(options []string) ([]string, error) {
- var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown int
+ var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int
finalOpts := make([]string, 0, len(options))
for _, opt := range options {
+ // support advanced options like upperdir=/path, workdir=/path
+ if strings.Contains(opt, "upperdir") {
+ foundUpperDir++
+ if foundUpperDir > 1 {
+ return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", "))
+ }
+ finalOpts = append(finalOpts, opt)
+ continue
+ }
+ if strings.Contains(opt, "workdir") {
+ foundWorkDir++
+ if foundWorkDir > 1 {
+ return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", "))
+ }
+ finalOpts = append(finalOpts, opt)
+ continue
+ }
+
switch opt {
case "noexec", "exec":
foundExec++
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 97dd8ea50..ebeef2f2d 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.37.0+dev
+1.38.0
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 96ca1f0b2..e7951ea77 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -3,22 +3,22 @@ go 1.14
module github.com/containers/storage
require (
- github.com/BurntSushi/toml v0.4.1
+ github.com/BurntSushi/toml v1.0.0
github.com/Microsoft/go-winio v0.5.1
- github.com/Microsoft/hcsshim v0.9.1
+ github.com/Microsoft/hcsshim v0.9.2
github.com/containerd/stargz-snapshotter/estargz v0.10.1
github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.13.6
+ github.com/klauspost/compress v1.14.1
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.5.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.3
+ github.com/opencontainers/runc v1.1.0
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.10.0
github.com/pkg/errors v0.9.1
@@ -29,6 +29,6 @@ require (
github.com/ulikunitz/xz v0.5.10
github.com/vbatts/tar-split v0.11.2
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
- golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
gotest.tools v2.2.0+incompatible
)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index c7262fe7a..9e55d7c0f 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -36,8 +36,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
-github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
+github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
@@ -57,8 +57,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM=
-github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM=
+github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -98,6 +98,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -106,6 +107,7 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -131,6 +133,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -296,6 +299,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -420,8 +424,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
+github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -515,8 +520,8 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
-github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -578,6 +583,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -839,8 +845,11 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
new file mode 100644
index 000000000..a931fb5d1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -0,0 +1,630 @@
+package chunked
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ storage "github.com/containers/storage"
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ jsoniter "github.com/json-iterator/go"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ cacheKey = "chunked-manifest-cache"
+ cacheVersion = 1
+)
+
+type metadata struct {
+ tagLen int
+ digestLen int
+ tags []byte
+ vdata []byte
+}
+
+type layer struct {
+ id string
+ metadata *metadata
+ target string
+}
+
+type layersCache struct {
+ layers []layer
+ refs int
+ store storage.Store
+ mutex sync.RWMutex
+ created time.Time
+}
+
+var cacheMutex sync.Mutex
+var cache *layersCache
+
+func (c *layersCache) release() {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+
+ c.refs--
+ if c.refs == 0 {
+ cache = nil
+ }
+}
+
+func getLayersCacheRef(store storage.Store) *layersCache {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+ if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
+ cache.refs++
+ return cache
+ }
+ cache := &layersCache{
+ store: store,
+ refs: 1,
+ created: time.Now(),
+ }
+ return cache
+}
+
+func getLayersCache(store storage.Store) (*layersCache, error) {
+ c := getLayersCacheRef(store)
+
+ if err := c.load(); err != nil {
+ c.release()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (c *layersCache) load() error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ allLayers, err := c.store.Layers()
+ if err != nil {
+ return err
+ }
+ existingLayers := make(map[string]string)
+ for _, r := range c.layers {
+ existingLayers[r.id] = r.target
+ }
+
+ currentLayers := make(map[string]string)
+ for _, r := range allLayers {
+ currentLayers[r.ID] = r.ID
+ if _, found := existingLayers[r.ID]; found {
+ continue
+ }
+
+ bigData, err := c.store.LayerBigData(r.ID, cacheKey)
+ if err != nil {
+ if errors.Cause(err) == os.ErrNotExist {
+ continue
+ }
+ return err
+ }
+ defer bigData.Close()
+
+ metadata, err := readMetadataFromCache(bigData)
+ if err != nil {
+ logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
+ }
+
+ if metadata != nil {
+ c.addLayer(r.ID, metadata)
+ continue
+ }
+
+ manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
+ if err != nil {
+ continue
+ }
+ defer manifestReader.Close()
+ manifest, err := ioutil.ReadAll(manifestReader)
+ if err != nil {
+ return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
+ }
+
+ metadata, err = writeCache(manifest, r.ID, c.store)
+ if err == nil {
+ c.addLayer(r.ID, metadata)
+ }
+ }
+
+ var newLayers []layer
+ for _, l := range c.layers {
+ if _, found := currentLayers[l.id]; found {
+ newLayers = append(newLayers, l)
+ }
+ }
+ c.layers = newLayers
+
+ return nil
+}
+
+// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file
+// is usable for deduplication with hardlinks.
+// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs.
+func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
+ digester := digest.Canonical.Digester()
+
+ modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode)
+ hash := digester.Hash()
+
+ if _, err := hash.Write([]byte(f.Digest)); err != nil {
+ return "", err
+ }
+
+ if _, err := hash.Write([]byte(modeString)); err != nil {
+ return "", err
+ }
+
+ if len(f.Xattrs) > 0 {
+ keys := make([]string, 0, len(f.Xattrs))
+ for k := range f.Xattrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ if _, err := hash.Write([]byte(k)); err != nil {
+ return "", err
+ }
+ if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil {
+ return "", err
+ }
+ }
+ }
+ return string(digester.Digest()), nil
+}
+
+// generateFileLocation generates a file location in the form $OFFSET@$PATH
+func generateFileLocation(path string, offset uint64) []byte {
+ return []byte(fmt.Sprintf("%d@%s", offset, path))
+}
+
+// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
+// the [OFFSET; LEN] points to the variable length data where the file locations
+// are stored. $DIGEST has length digestLen stored in the metadata file header.
+func generateTag(digest string, offset, len uint64) string {
+ return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len)
+}
+
+type setBigData interface {
+ // SetLayerBigData stores a (possibly large) chunk of named data
+ SetLayerBigData(id, key string, data io.Reader) error
+}
+
+// writeCache write a cache for the layer ID.
+// It generates a sorted list of digests with their offset to the path location and offset.
+// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
+// There are 3 kind of digests stored:
+// - digest(file.payload))
+// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
+// - digest(i) for each i in chunks(file payload)
+func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) {
+ var vdata bytes.Buffer
+ tagLen := 0
+ digestLen := 0
+ var tagsBuffer bytes.Buffer
+
+ toc, err := prepareMetadata(manifest)
+ if err != nil {
+ return nil, err
+ }
+
+ var tags []string
+ for _, k := range toc {
+ if k.Digest != "" {
+ location := generateFileLocation(k.Name, 0)
+
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+
+ d := generateTag(k.Digest, off, l)
+ if tagLen == 0 {
+ tagLen = len(d)
+ }
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ fp, err := calculateHardLinkFingerprint(k)
+ if err != nil {
+ return nil, err
+ }
+ d = generateTag(fp, off, l)
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+
+ digestLen = len(k.Digest)
+ }
+ if k.ChunkDigest != "" {
+ location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+ d := generateTag(k.ChunkDigest, off, l)
+ if tagLen == 0 {
+ tagLen = len(d)
+ }
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+ digestLen = len(k.ChunkDigest)
+ }
+ }
+
+ sort.Strings(tags)
+
+ for _, t := range tags {
+ if _, err := tagsBuffer.Write([]byte(t)); err != nil {
+ return nil, err
+ }
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+ errChan := make(chan error, 1)
+ go func() {
+ defer pipeWriter.Close()
+ defer close(errChan)
+
+ // version
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // len of a tag
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // len of a digest
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // tags length
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
+ errChan <- err
+ return
+ }
+
+ // vdata length
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil {
+ errChan <- err
+ return
+ }
+
+ // tags
+ if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil {
+ errChan <- err
+ return
+ }
+
+ // variable length data
+ if _, err := pipeWriter.Write(vdata.Bytes()); err != nil {
+ errChan <- err
+ return
+ }
+
+ errChan <- nil
+ }()
+ defer pipeReader.Close()
+
+ counter := ioutils.NewWriteCounter(ioutil.Discard)
+
+ r := io.TeeReader(pipeReader, counter)
+
+ if err := dest.SetLayerBigData(id, cacheKey, r); err != nil {
+ return nil, err
+ }
+
+ if err := <-errChan; err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
+
+ return &metadata{
+ digestLen: digestLen,
+ tagLen: tagLen,
+ tags: tagsBuffer.Bytes(),
+ vdata: vdata.Bytes(),
+ }, nil
+}
+
+func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
+ var version, tagLen, digestLen, tagsLen, vdataLen uint64
+ if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
+ return nil, err
+ }
+ if version != cacheVersion {
+ return nil, nil
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil {
+ return nil, err
+ }
+
+ tags := make([]byte, tagsLen)
+ if _, err := bigData.Read(tags); err != nil {
+ return nil, err
+ }
+
+ vdata := make([]byte, vdataLen)
+ if _, err := bigData.Read(vdata); err != nil {
+ return nil, err
+ }
+
+ return &metadata{
+ tagLen: int(tagLen),
+ digestLen: int(digestLen),
+ tags: tags,
+ vdata: vdata,
+ }, nil
+}
+
+func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
+ toc, err := unmarshalToc(manifest)
+ if err != nil {
+ // ignore errors here. They might be caused by a different manifest format.
+ return nil, nil
+ }
+
+ var r []*internal.FileMetadata
+ chunkSeen := make(map[string]bool)
+ for i := range toc.Entries {
+ d := toc.Entries[i].Digest
+ if d != "" {
+ r = append(r, &toc.Entries[i])
+ continue
+ }
+
+ // chunks do not use hard link dedup so keeping just one candidate is enough
+ cd := toc.Entries[i].ChunkDigest
+ if cd != "" && !chunkSeen[cd] {
+ r = append(r, &toc.Entries[i])
+ chunkSeen[cd] = true
+ }
+ }
+ return r, nil
+}
+
+func (c *layersCache) addLayer(id string, metadata *metadata) error {
+ target, err := c.store.DifferTarget(id)
+ if err != nil {
+ return fmt.Errorf("get checkout directory layer %q: %w", id, err)
+ }
+
+ l := layer{
+ id: id,
+ metadata: metadata,
+ target: target,
+ }
+ c.layers = append(c.layers, l)
+ return nil
+}
+
+func byteSliceAsString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
+ if len(digest) != metadata.digestLen {
+ return "", 0, 0
+ }
+
+ nElements := len(metadata.tags) / metadata.tagLen
+
+ i := sort.Search(nElements, func(i int) bool {
+ d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen])
+ return strings.Compare(d, digest) >= 0
+ })
+ if i < nElements {
+ d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)])
+ if digest == d {
+ startOff := i*metadata.tagLen + metadata.digestLen
+ parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
+ off, _ := strconv.ParseInt(parts[0], 10, 64)
+ len, _ := strconv.ParseInt(parts[1], 10, 64)
+ return digest, uint64(off), uint64(len)
+ }
+ }
+ return "", 0, 0
+}
+
+func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
+ if digest == "" {
+ return "", "", -1, nil
+ }
+
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ for _, layer := range c.layers {
+ digest, off, len := findTag(digest, layer.metadata)
+ if digest != "" {
+ position := string(layer.metadata.vdata[off : off+len])
+ parts := strings.SplitN(position, "@", 2)
+ offFile, _ := strconv.ParseInt(parts[0], 10, 64)
+ return layer.target, parts[1], offFile, nil
+ }
+ }
+
+ return "", "", -1, nil
+}
+
+// findFileInOtherLayers finds the specified file in other layers.
+// file is the file to look for.
+func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) {
+ digest := file.Digest
+ if useHardLinks {
+ var err error
+ digest, err = calculateHardLinkFingerprint(file)
+ if err != nil {
+ return "", "", err
+ }
+ }
+ target, name, off, err := c.findDigestInternal(digest)
+ if off == 0 {
+ return target, name, err
+ }
+ return "", "", nil
+}
+
+func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
+ return c.findDigestInternal(chunk.ChunkDigest)
+}
+
+func unmarshalToc(manifest []byte) (*internal.TOC, error) {
+ var buf bytes.Buffer
+ count := 0
+ var toc internal.TOC
+
+ iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ if field != "entries" {
+ iter.Skip()
+ continue
+ }
+ for iter.ReadArray() {
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch field {
+ case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
+ count += len(iter.ReadStringAsSlice())
+ case "xattrs":
+ for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+ count += len(iter.ReadStringAsSlice())
+ }
+ default:
+ iter.Skip()
+ }
+ }
+ }
+ break
+ }
+
+ buf.Grow(count)
+
+ getString := func(b []byte) string {
+ from := buf.Len()
+ buf.Write(b)
+ to := buf.Len()
+ return byteSliceAsString(buf.Bytes()[from:to])
+ }
+
+ iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ if field == "version" {
+ toc.Version = iter.ReadInt()
+ continue
+ }
+ if field != "entries" {
+ iter.Skip()
+ continue
+ }
+ for iter.ReadArray() {
+ var m internal.FileMetadata
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch field {
+ case "type":
+ m.Type = getString(iter.ReadStringAsSlice())
+ case "name":
+ m.Name = getString(iter.ReadStringAsSlice())
+ case "linkName":
+ m.Linkname = getString(iter.ReadStringAsSlice())
+ case "mode":
+ m.Mode = iter.ReadInt64()
+ case "size":
+ m.Size = iter.ReadInt64()
+ case "UID":
+ m.UID = iter.ReadInt()
+ case "GID":
+ m.GID = iter.ReadInt()
+ case "ModTime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.ModTime = &time
+ case "accesstime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.AccessTime = &time
+ case "changetime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.ChangeTime = &time
+ case "devMajor":
+ m.Devmajor = iter.ReadInt64()
+ case "devMinor":
+ m.Devminor = iter.ReadInt64()
+ case "digest":
+ m.Digest = getString(iter.ReadStringAsSlice())
+ case "offset":
+ m.Offset = iter.ReadInt64()
+ case "endOffset":
+ m.EndOffset = iter.ReadInt64()
+ case "chunkSize":
+ m.ChunkSize = iter.ReadInt64()
+ case "chunkOffset":
+ m.ChunkOffset = iter.ReadInt64()
+ case "chunkDigest":
+ m.ChunkDigest = getString(iter.ReadStringAsSlice())
+ case "chunkType":
+ m.ChunkType = getString(iter.ReadStringAsSlice())
+ case "xattrs":
+ m.Xattrs = make(map[string]string)
+ for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+ value := iter.ReadStringAsSlice()
+ m.Xattrs[key] = getString(value)
+ }
+ default:
+ iter.Skip()
+ }
+ }
+ toc.Entries = append(toc.Entries, m)
+ }
+ break
+ }
+ toc.StringsBuf = buf
+ return &toc, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
index 092cf584a..aeb7cfd4f 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -5,6 +5,7 @@ package compressor
// larger software like the graph drivers.
import (
+ "bufio"
"encoding/base64"
"io"
"io/ioutil"
@@ -15,6 +16,189 @@ import (
"github.com/vbatts/tar-split/archive/tar"
)
+const RollsumBits = 16
+const holesThreshold = int64(1 << 10)
+
+type holesFinder struct {
+ reader *bufio.Reader
+ fileOff int64
+ zeros int64
+ from int64
+ threshold int64
+
+ state int
+}
+
+const (
+ holesFinderStateRead = iota
+ holesFinderStateAccumulate
+ holesFinderStateFound
+ holesFinderStateEOF
+)
+
+// ReadByte reads a single byte from the underlying reader.
+// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
+// If there are at least f.THRESHOLD consecutive zeros, then the
+// return value is (N_CONSECUTIVE_ZEROS, '\x00').
+func (f *holesFinder) ReadByte() (int64, byte, error) {
+ for {
+ switch f.state {
+ // reading the file stream
+ case holesFinderStateRead:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ return 0, b, err
+ }
+
+ if b != 0 {
+ return 0, b, err
+ }
+
+ f.zeros = 1
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ } else {
+ f.state = holesFinderStateAccumulate
+ }
+ // accumulating zeros, but still didn't reach the threshold
+ case holesFinderStateAccumulate:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ continue
+ }
+ return 0, b, err
+ }
+
+ if b == 0 {
+ f.zeros++
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ }
+ } else {
+ if f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+ }
+ // found a hole. Number of zeros >= threshold
+ case holesFinderStateFound:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ }
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ if b != 0 {
+ if f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ f.zeros++
+ // reached EOF. Flush pending zeros if any.
+ case holesFinderStateEOF:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ return 0, 0, io.EOF
+ }
+ }
+}
+
+type rollingChecksumReader struct {
+ reader *holesFinder
+ closed bool
+ rollsum *RollSum
+ pendingHole int64
+
+ // WrittenOut is the total number of bytes read from
+ // the stream.
+ WrittenOut int64
+
+ // IsLastChunkZeros tells whether the last generated
+ // chunk is a hole (made of consecutive zeros). If it
+ // is false, then the last chunk is a data chunk
+ // generated by the rolling checksum.
+ IsLastChunkZeros bool
+}
+
+func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
+ rc.IsLastChunkZeros = false
+
+ if rc.pendingHole > 0 {
+ toCopy := int64(len(b))
+ if rc.pendingHole < toCopy {
+ toCopy = rc.pendingHole
+ }
+ rc.pendingHole -= toCopy
+ for i := int64(0); i < toCopy; i++ {
+ b[i] = 0
+ }
+
+ rc.WrittenOut += toCopy
+
+ rc.IsLastChunkZeros = true
+
+ // if there are no other zeros left, terminate the chunk
+ return rc.pendingHole == 0, int(toCopy), nil
+ }
+
+ if rc.closed {
+ return false, 0, io.EOF
+ }
+
+ for i := 0; i < len(b); i++ {
+ holeLen, n, err := rc.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ rc.closed = true
+ if i == 0 {
+ return false, 0, err
+ }
+ return false, i, nil
+ }
+ // Report any other error type
+ return false, -1, err
+ }
+ if holeLen > 0 {
+ for j := int64(0); j < holeLen; j++ {
+ rc.rollsum.Roll(0)
+ }
+ rc.pendingHole = holeLen
+ return true, i, nil
+ }
+ b[i] = n
+ rc.WrittenOut++
+ rc.rollsum.Roll(n)
+ if rc.rollsum.OnSplitWithBits(RollsumBits) {
+ return true, i + 1, nil
+ }
+ }
+ return false, len(b), nil
+}
+
+type chunk struct {
+ ChunkOffset int64
+ Offset int64
+ Checksum string
+ ChunkSize int64
+ ChunkType string
+}
+
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
// total written so far. Used to retrieve partial offsets in the file
dest := ioutils.NewWriteCounter(destFile)
@@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
if _, err := zstdWriter.Write(rawBytes); err != nil {
return err
}
- payloadDigester := digest.Canonical.Digester()
- payloadChecksum := payloadDigester.Hash()
- payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
+ payloadDigester := digest.Canonical.Digester()
+ chunkDigester := digest.Canonical.Digester()
// Now handle the payload, if any
- var startOffset, endOffset int64
+ startOffset := int64(0)
+ lastOffset := int64(0)
+ lastChunkOffset := int64(0)
+
checksum := ""
+
+ chunks := []chunk{}
+
+ hf := &holesFinder{
+ threshold: holesThreshold,
+ reader: bufio.NewReader(tr),
+ }
+
+ rcReader := &rollingChecksumReader{
+ reader: hf,
+ rollsum: NewRollSum(),
+ }
+
+ payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
for {
- read, errRead := tr.Read(buf)
+ mustSplit, read, errRead := rcReader.Read(buf)
if errRead != nil && errRead != io.EOF {
return err
}
-
- // restart the compression only if there is
- // a payload.
+ // restart the compression only if there is a payload.
if read > 0 {
if startOffset == 0 {
startOffset, err = restartCompression()
if err != nil {
return err
}
+ lastOffset = startOffset
+ }
+
+ if _, err := payloadDest.Write(buf[:read]); err != nil {
+ return err
}
- _, err := payloadDest.Write(buf[:read])
+ }
+ if (mustSplit || errRead == io.EOF) && startOffset > 0 {
+ off, err := restartCompression()
if err != nil {
return err
}
+
+ chunkSize := rcReader.WrittenOut - lastChunkOffset
+ if chunkSize > 0 {
+ chunkType := internal.ChunkTypeData
+ if rcReader.IsLastChunkZeros {
+ chunkType = internal.ChunkTypeZeros
+ }
+
+ chunks = append(chunks, chunk{
+ ChunkOffset: lastChunkOffset,
+ Offset: lastOffset,
+ Checksum: chunkDigester.Digest().String(),
+ ChunkSize: chunkSize,
+ ChunkType: chunkType,
+ })
+ }
+
+ lastOffset = off
+ lastChunkOffset = rcReader.WrittenOut
+ chunkDigester = digest.Canonical.Digester()
+ payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
}
if errRead == io.EOF {
if startOffset > 0 {
- endOffset, err = restartCompression()
- if err != nil {
- return err
- }
checksum = payloadDigester.Digest().String()
}
break
@@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
for k, v := range hdr.Xattrs {
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
}
- m := internal.FileMetadata{
- Type: typ,
- Name: hdr.Name,
- Linkname: hdr.Linkname,
- Mode: hdr.Mode,
- Size: hdr.Size,
- UID: hdr.Uid,
- GID: hdr.Gid,
- ModTime: hdr.ModTime,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- Devmajor: hdr.Devmajor,
- Devminor: hdr.Devminor,
- Xattrs: xattrs,
- Digest: checksum,
- Offset: startOffset,
- EndOffset: endOffset,
-
- // ChunkSize is 0 for the last chunk
- ChunkSize: 0,
- ChunkOffset: 0,
- ChunkDigest: checksum,
- }
- metadata = append(metadata, m)
+ entries := []internal.FileMetadata{
+ {
+ Type: typ,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Mode: hdr.Mode,
+ Size: hdr.Size,
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: &hdr.ModTime,
+ AccessTime: &hdr.AccessTime,
+ ChangeTime: &hdr.ChangeTime,
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: xattrs,
+ Digest: checksum,
+ Offset: startOffset,
+ EndOffset: lastOffset,
+ },
+ }
+ for i := 1; i < len(chunks); i++ {
+ entries = append(entries, internal.FileMetadata{
+ Type: internal.TypeChunk,
+ Name: hdr.Name,
+ ChunkOffset: chunks[i].ChunkOffset,
+ })
+ }
+ if len(chunks) > 1 {
+ for i := range chunks {
+ entries[i].ChunkSize = chunks[i].ChunkSize
+ entries[i].Offset = chunks[i].Offset
+ entries[i].ChunkDigest = chunks[i].Checksum
+ entries[i].ChunkType = chunks[i].ChunkType
+ }
+ }
+ metadata = append(metadata, entries...)
}
rawBytes := tr.RawBytes()
@@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
if level == nil {
- l := 3
+ l := 10
level = &l
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
new file mode 100644
index 000000000..f4dfad822
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2011 The Perkeep Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package rollsum implements rolling checksums similar to apenwarr's bup, which
+// is similar to librsync.
+//
+// The bup project is at https://github.com/apenwarr/bup and its splitting in
+// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c
+package compressor
+
+import (
+ "math/bits"
+)
+
+const windowSize = 64 // Roll assumes windowSize is a power of 2
+const charOffset = 31
+
+const blobBits = 13
+const blobSize = 1 << blobBits // 8k
+
+type RollSum struct {
+ s1, s2 uint32
+ window [windowSize]uint8
+ wofs int
+}
+
+func NewRollSum() *RollSum {
+ return &RollSum{
+ s1: windowSize * charOffset,
+ s2: windowSize * (windowSize - 1) * charOffset,
+ }
+}
+
+func (rs *RollSum) add(drop, add uint32) {
+ s1 := rs.s1 + add - drop
+ rs.s1 = s1
+ rs.s2 += s1 - uint32(windowSize)*(drop+charOffset)
+}
+
+// Roll adds ch to the rolling sum.
+func (rs *RollSum) Roll(ch byte) {
+ wp := &rs.window[rs.wofs]
+ rs.add(uint32(*wp), uint32(ch))
+ *wp = ch
+ rs.wofs = (rs.wofs + 1) & (windowSize - 1)
+}
+
+// OnSplit reports whether at least 13 consecutive trailing bits of
+// the current checksum are set the same way.
+func (rs *RollSum) OnSplit() bool {
+ return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1))
+}
+
+// OnSplitWithBits reports whether at least n consecutive trailing bits
+// of the current checksum are set the same way.
+func (rs *RollSum) OnSplitWithBits(n uint32) bool {
+ mask := (uint32(1) << n) - 1
+ return rs.s2&mask == (^uint32(0))&mask
+}
+
+func (rs *RollSum) Bits() int {
+ rsum := rs.Digest() >> (blobBits + 1)
+ return blobBits + bits.TrailingZeros32(^rsum)
+}
+
+func (rs *RollSum) Digest() uint32 {
+ return (rs.s1 << 16) | (rs.s2 & 0xffff)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
index c91c43d85..3bb5286d9 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -8,11 +8,11 @@ import (
"archive/tar"
"bytes"
"encoding/binary"
- "encoding/json"
"fmt"
"io"
"time"
+ jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
)
@@ -20,6 +20,9 @@ import (
type TOC struct {
Version int `json:"version"`
Entries []FileMetadata `json:"entries"`
+
+ // internal: used by unmarshalToc
+ StringsBuf bytes.Buffer `json:"-"`
}
type FileMetadata struct {
@@ -27,26 +30,34 @@ type FileMetadata struct {
Name string `json:"name"`
Linkname string `json:"linkName,omitempty"`
Mode int64 `json:"mode,omitempty"`
- Size int64 `json:"size"`
- UID int `json:"uid"`
- GID int `json:"gid"`
- ModTime time.Time `json:"modtime"`
- AccessTime time.Time `json:"accesstime"`
- ChangeTime time.Time `json:"changetime"`
- Devmajor int64 `json:"devMajor"`
- Devminor int64 `json:"devMinor"`
+ Size int64 `json:"size,omitempty"`
+ UID int `json:"uid,omitempty"`
+ GID int `json:"gid,omitempty"`
+ ModTime *time.Time `json:"modtime,omitempty"`
+ AccessTime *time.Time `json:"accesstime,omitempty"`
+ ChangeTime *time.Time `json:"changetime,omitempty"`
+ Devmajor int64 `json:"devMajor,omitempty"`
+ Devminor int64 `json:"devMinor,omitempty"`
Xattrs map[string]string `json:"xattrs,omitempty"`
Digest string `json:"digest,omitempty"`
Offset int64 `json:"offset,omitempty"`
EndOffset int64 `json:"endOffset,omitempty"`
- // Currently chunking is not supported.
ChunkSize int64 `json:"chunkSize,omitempty"`
ChunkOffset int64 `json:"chunkOffset,omitempty"`
ChunkDigest string `json:"chunkDigest,omitempty"`
+ ChunkType string `json:"chunkType,omitempty"`
+
+ // internal: computed by mergeTOCEntries.
+ Chunks []*FileMetadata `json:"-"`
}
const (
+ ChunkTypeData = ""
+ ChunkTypeZeros = "zeros"
+)
+
+const (
TypeReg = "reg"
TypeChunk = "chunk"
TypeLink = "hardlink"
@@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
Entries: metadata,
}
+ var json = jsoniter.ConfigCompatibleWithStandardLibrary
// Generate the manifest
manifest, err := json.Marshal(toc)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 52d21d689..92b15c2bf 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -4,8 +4,8 @@ import (
archivetar "archive/tar"
"context"
"encoding/base64"
- "encoding/json"
"fmt"
+ "hash"
"io"
"io/ioutil"
"os"
@@ -13,6 +13,7 @@ import (
"reflect"
"sort"
"strings"
+ "sync"
"sync/atomic"
"syscall"
"time"
@@ -43,28 +44,35 @@ const (
bigDataKey = "zstd-chunked-manifest"
fileTypeZstdChunked = iota
- fileTypeEstargz = iota
+ fileTypeEstargz
+ fileTypeNoCompression
+ fileTypeHole
+
+ copyGoRoutines = 32
)
type compressedFileType int
type chunkedDiffer struct {
- stream ImageSourceSeekable
- manifest []byte
- layersMetadata map[string][]internal.FileMetadata
- layersTarget map[string]string
- tocOffset int64
- fileType compressedFileType
+ stream ImageSourceSeekable
+ manifest []byte
+ layersCache *layersCache
+ tocOffset int64
+ fileType compressedFileType
+
+ copyBuffer []byte
gzipReader *pgzip.Reader
+ zstdReader *zstd.Decoder
+ rawReader io.Reader
}
var xattrsToIgnore = map[string]interface{}{
"security.selinux": true,
}
-func timeToTimespec(time time.Time) (ts unix.Timespec) {
- if time.IsZero() {
+func timeToTimespec(time *time.Time) (ts unix.Timespec) {
+ if time == nil || time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
@@ -128,54 +136,6 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
return dstFile, st.Size(), nil
}
-func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string][]*internal.FileMetadata {
- maps := make(map[string]map[string][]*internal.FileMetadata)
-
- for layerID, v := range layersMetadata {
- r := make(map[string][]*internal.FileMetadata)
- for i := range v {
- if v[i].Digest != "" {
- r[v[i].Digest] = append(r[v[i].Digest], &v[i])
- }
- }
- maps[layerID] = r
- }
- return maps
-}
-
-func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) {
- allLayers, err := store.Layers()
- if err != nil {
- return nil, nil, err
- }
-
- layersMetadata := make(map[string][]internal.FileMetadata)
- layersTarget := make(map[string]string)
- for _, r := range allLayers {
- manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
- if err != nil {
- continue
- }
- defer manifestReader.Close()
- manifest, err := ioutil.ReadAll(manifestReader)
- if err != nil {
- return nil, nil, fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
- }
- var toc internal.TOC
- if err := json.Unmarshal(manifest, &toc); err != nil {
- continue
- }
- layersMetadata[r.ID] = toc.Entries
- target, err := store.DifferTarget(r.ID)
- if err != nil {
- return nil, nil, fmt.Errorf("get checkout directory layer %q: %w", r.ID, err)
- }
- layersTarget[r.ID] = target
- }
-
- return layersMetadata, layersTarget, nil
-}
-
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
@@ -192,18 +152,18 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
- layersMetadata, layersTarget, err := getLayersCache(store)
+ layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
}
return &chunkedDiffer{
- stream: iss,
- manifest: manifest,
- layersMetadata: layersMetadata,
- layersTarget: layersTarget,
- tocOffset: tocOffset,
- fileType: fileTypeZstdChunked,
+ copyBuffer: makeCopyBuffer(),
+ stream: iss,
+ manifest: manifest,
+ layersCache: layersCache,
+ tocOffset: tocOffset,
+ fileType: fileTypeZstdChunked,
}, nil
}
@@ -212,37 +172,41 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
- layersMetadata, layersTarget, err := getLayersCache(store)
+ layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
}
return &chunkedDiffer{
- stream: iss,
- manifest: manifest,
- layersMetadata: layersMetadata,
- layersTarget: layersTarget,
- tocOffset: tocOffset,
- fileType: fileTypeEstargz,
+ copyBuffer: makeCopyBuffer(),
+ stream: iss,
+ manifest: manifest,
+ layersCache: layersCache,
+ tocOffset: tocOffset,
+ fileType: fileTypeEstargz,
}, nil
}
+func makeCopyBuffer() []byte {
+ return make([]byte, 2<<20)
+}
+
// copyFileFromOtherLayer copies a file from another layer
// file is the file to look for.
// source is the path to the source layer checkout.
-// otherFile contains the metadata for the file.
+// name is the path to the file to copy in source.
// dirfd is an open file descriptor to the destination root directory.
// useHardLinks defines whether the deduplication can be performed using hard links.
-func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
if err != nil {
- return false, nil, 0, fmt.Errorf("open source file %q: %w", source, err)
+ return false, nil, 0, fmt.Errorf("open source file: %w", err)
}
defer unix.Close(srcDirfd)
- srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0)
+ srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0)
if err != nil {
- return false, nil, 0, fmt.Errorf("open source file %q under target rootfs: %w", otherFile.Name, err)
+ return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err)
}
defer srcFile.Close()
@@ -308,45 +272,9 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
return canDedupMetadataWithHardLink(file, &otherFile)
}
-// findFileInOtherLayers finds the specified file in other layers.
-// file is the file to look for.
-// dirfd is an open file descriptor to the checkout root directory.
-// layersMetadata contains the metadata for each layer in the storage.
-// layersTarget maps each layer to its checkout on disk.
-// useHardLinks defines whether the deduplication can be performed using hard links.
-func findFileInOtherLayers(file *internal.FileMetadata, dirfd int, layersMetadata map[string]map[string][]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) {
- // this is ugly, needs to be indexed
- for layerID, checksums := range layersMetadata {
- source, ok := layersTarget[layerID]
- if !ok {
- continue
- }
- files, found := checksums[file.Digest]
- if !found {
- continue
- }
- for _, candidate := range files {
- // check if it is a valid candidate to dedup file
- if useHardLinks && !canDedupMetadataWithHardLink(file, candidate) {
- continue
- }
-
- found, dstFile, written, err := copyFileFromOtherLayer(file, source, candidate, dirfd, useHardLinks)
- if found && err == nil {
- return found, dstFile, written, err
- }
- }
- }
- // If hard links deduplication was used and it has failed, try again without hard links.
- if useHardLinks {
- return findFileInOtherLayers(file, dirfd, layersMetadata, layersTarget, false)
- }
- return false, nil, 0, nil
-}
-
-func getFileDigest(f *os.File) (digest.Digest, error) {
+func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) {
digester := digest.Canonical.Digester()
- if _, err := io.Copy(digester.Hash(), f); err != nil {
+ if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil {
return "", err
}
return digester.Digest(), nil
@@ -408,7 +336,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di
// file is the file to look for.
// dirfd is an open fd to the destination checkout.
// useHardLinks defines whether the deduplication can be performed using hard links.
-func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) {
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
if !strings.HasPrefix(sourceFile, "/usr/") {
// limit host deduplication to files under /usr.
@@ -437,7 +365,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool
return false, nil, 0, err
}
- checksum, err := getFileDigest(f)
+ checksum, err := getFileDigest(f, buf)
if err != nil {
return false, nil, 0, err
}
@@ -459,7 +387,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool
dstFile.Close()
return false, nil, 0, err
}
- checksum, err = getFileDigest(f)
+ checksum, err = getFileDigest(f, buf)
if err != nil {
dstFile.Close()
return false, nil, 0, err
@@ -471,6 +399,19 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool
return true, dstFile, written, nil
}
+// findFileInOtherLayers finds the specified file in other layers.
+// cache is the layers cache to use.
+// file is the file to look for.
+// dirfd is an open file descriptor to the checkout root directory.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ target, name, err := cache.findFileInOtherLayers(file, useHardLinks)
+ if err != nil || name == "" {
+ return false, nil, 0, err
+ }
+ return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks)
+}
+
func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error {
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
return nil
@@ -497,18 +438,46 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption
return nil
}
-type missingFile struct {
- File *internal.FileMetadata
+type originFile struct {
+ Root string
+ Path string
+ Offset int64
+}
+
+type missingFileChunk struct {
Gap int64
+ Hole bool
+
+ File *internal.FileMetadata
+
+ CompressedSize int64
+ UncompressedSize int64
}
-func (m missingFile) Length() int64 {
- return m.File.EndOffset - m.File.Offset
+type missingPart struct {
+ Hole bool
+ SourceChunk *ImageSourceChunk
+ OriginFile *originFile
+ Chunks []missingFileChunk
}
-type missingChunk struct {
- RawChunk ImageSourceChunk
- Files []missingFile
+func (o *originFile) OpenFile() (io.ReadCloser, error) {
+ srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("open source file: %w", err)
+ }
+ defer unix.Close(srcDirfd)
+
+ srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("open source file under target rootfs: %w", err)
+ }
+
+ if _, err := srcFile.Seek(o.Offset, 0); err != nil {
+ srcFile.Close()
+ return nil, err
+ }
+ return srcFile, nil
}
// setFileAttrs sets the file attributes for file given metadata
@@ -711,7 +680,7 @@ func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (
newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0)
if err2 == nil {
defer newDirfd.Close()
- fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
+ fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
@@ -755,159 +724,367 @@ func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.Fil
return nil, err
}
-func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) {
- file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
- if err != nil {
- return err
+func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) {
+ switch {
+ case partCompression == fileTypeHole:
+ // The entire part is a hole. Do not need to read from a file.
+ c.rawReader = nil
+ return fileTypeHole, nil
+ case mf.Hole:
+ // Only the missing chunk in the requested part refers to a hole.
+ // The received data must be discarded.
+ limitReader := io.LimitReader(from, mf.CompressedSize)
+ _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer)
+ return fileTypeHole, err
+ case partCompression == fileTypeZstdChunked:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
+ if c.zstdReader == nil {
+ r, err := zstd.NewReader(c.rawReader)
+ if err != nil {
+ return partCompression, err
+ }
+ c.zstdReader = r
+ } else {
+ if err := c.zstdReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
+ }
+ }
+ case partCompression == fileTypeEstargz:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
+ if c.gzipReader == nil {
+ r, err := pgzip.NewReader(c.rawReader)
+ if err != nil {
+ return partCompression, err
+ }
+ c.gzipReader = r
+ } else {
+ if err := c.gzipReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
+ }
+ }
+ case partCompression == fileTypeNoCompression:
+ c.rawReader = io.LimitReader(from, mf.UncompressedSize)
+ default:
+ return partCompression, fmt.Errorf("unknown file type %q", c.fileType)
}
- defer func() {
- err2 := file.Close()
- if err == nil {
- err = err2
+ return partCompression, nil
+}
+
+// hashHole writes SIZE zeros to the specified hasher
+func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
+ count := int64(len(copyBuffer))
+ if size < count {
+ count = size
+ }
+ for i := int64(0); i < count; i++ {
+ copyBuffer[i] = 0
+ }
+ for size > 0 {
+ count = int64(len(copyBuffer))
+ if size < count {
+ count = size
}
- }()
+ if _, err := h.Write(copyBuffer[:count]); err != nil {
+ return err
+ }
+ size -= count
+ }
+ return nil
+}
- digester := digest.Canonical.Digester()
- checksum := digester.Hash()
- to := io.MultiWriter(file, checksum)
+// appendHole creates a hole with the specified size at the open fd.
+func appendHole(fd int, size int64) error {
+ off, err := unix.Seek(fd, size, unix.SEEK_CUR)
+ if err != nil {
+ return err
+ }
+ // Make sure the file size is changed. It might be the last hole and no other data written afterwards.
+ if err := unix.Ftruncate(fd, off); err != nil {
+ return err
+ }
+ return nil
+}
- switch c.fileType {
+func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error {
+ switch compression {
case fileTypeZstdChunked:
- z, err := zstd.NewReader(reader)
- if err != nil {
+ defer c.zstdReader.Reset(nil)
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil {
return err
}
- defer z.Close()
-
- if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil {
+ case fileTypeEstargz:
+ defer c.gzipReader.Close()
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil {
return err
}
- if _, err := io.Copy(ioutil.Discard, reader); err != nil {
+ case fileTypeNoCompression:
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil {
return err
}
- case fileTypeEstargz:
- if c.gzipReader == nil {
- r, err := pgzip.NewReader(reader)
- if err != nil {
- return err
- }
- c.gzipReader = r
- } else {
- if err := c.gzipReader.Reset(reader); err != nil {
- return err
- }
- }
- defer c.gzipReader.Close()
-
- if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil {
+ case fileTypeHole:
+ if err := appendHole(int(destFile.file.Fd()), size); err != nil {
return err
}
- if _, err := io.Copy(ioutil.Discard, reader); err != nil {
+ if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil {
return err
}
default:
return fmt.Errorf("unknown file type %q", c.fileType)
}
+ return nil
+}
+
+type destinationFile struct {
+ dirfd int
+ file *os.File
+ digester digest.Digester
+ hash hash.Hash
+ to io.Writer
+ metadata *internal.FileMetadata
+ options *archive.TarOptions
+}
+
+func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) {
+ file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ digester := digest.Canonical.Digester()
+ hash := digester.Hash()
+ to := io.MultiWriter(file, hash)
+
+ return &destinationFile{
+ file: file,
+ digester: digester,
+ hash: hash,
+ to: to,
+ metadata: metadata,
+ options: options,
+ dirfd: dirfd,
+ }, nil
+}
- manifestChecksum, err := digest.Parse(metadata.Digest)
+func (d *destinationFile) Close() error {
+ manifestChecksum, err := digest.Parse(d.metadata.Digest)
if err != nil {
return err
}
- if digester.Digest() != manifestChecksum {
- return fmt.Errorf("checksum mismatch for %q", dest)
+ if d.digester.Digest() != manifestChecksum {
+ return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum)
}
- return setFileAttrs(dirfd, file, mode, metadata, options, false)
+
+ return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
}
-func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
- for mc := 0; ; mc++ {
- var part io.ReadCloser
- select {
- case p := <-streams:
- part = p
- case err := <-errs:
- return err
- }
- if part == nil {
- if mc == len(missingChunks) {
- break
+func closeDestinationFiles(files chan *destinationFile, errors chan error) {
+ for f := range files {
+ errors <- f.Close()
+ }
+ close(errors)
+}
+
+func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
+ var destFile *destinationFile
+
+ filesToClose := make(chan *destinationFile, 3)
+ closeFilesErrors := make(chan error, 2)
+
+ go closeDestinationFiles(filesToClose, closeFilesErrors)
+ defer func() {
+ close(filesToClose)
+ for e := range closeFilesErrors {
+ if e != nil && Err == nil {
+ Err = e
}
- return errors.Errorf("invalid stream returned")
}
- if mc == len(missingChunks) {
- part.Close()
- return errors.Errorf("too many chunks returned")
+ }()
+
+ for _, missingPart := range missingParts {
+ var part io.ReadCloser
+ partCompression := c.fileType
+ switch {
+ case missingPart.Hole:
+ partCompression = fileTypeHole
+ case missingPart.OriginFile != nil:
+ var err error
+ part, err = missingPart.OriginFile.OpenFile()
+ if err != nil {
+ return err
+ }
+ partCompression = fileTypeNoCompression
+ case missingPart.SourceChunk != nil:
+ select {
+ case p := <-streams:
+ part = p
+ case err := <-errs:
+ return err
+ }
+ if part == nil {
+ return errors.Errorf("invalid stream returned")
+ }
+ default:
+ return errors.Errorf("internal error: missing part misses both local and remote data stream")
}
- for _, mf := range missingChunks[mc].Files {
+ for _, mf := range missingPart.Chunks {
if mf.Gap > 0 {
limitReader := io.LimitReader(part, mf.Gap)
- _, err := io.Copy(ioutil.Discard, limitReader)
+ _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer)
if err != nil {
- part.Close()
- return err
+ Err = err
+ goto exit
}
continue
}
- limitReader := io.LimitReader(part, mf.Length())
+ if mf.File.Name == "" {
+ Err = errors.Errorf("file name empty")
+ goto exit
+ }
- if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
- part.Close()
- return err
+ compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+
+ // Open the new file if it is different that what is already
+ // opened
+ if destFile == nil || destFile.metadata.Name != mf.File.Name {
+ var err error
+ if destFile != nil {
+ cleanup:
+ for {
+ select {
+ case err = <-closeFilesErrors:
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ default:
+ break cleanup
+ }
+ }
+ filesToClose <- destFile
+ }
+ destFile, err = openDestinationFile(dirfd, mf.File, options)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ }
+
+ if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil {
+ Err = err
+ goto exit
+ }
+ if c.rawReader != nil {
+ if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil {
+ Err = err
+ goto exit
+ }
+ }
+ }
+ exit:
+ if part != nil {
+ part.Close()
+ if Err != nil {
+ break
}
}
- part.Close()
}
+
+ if destFile != nil {
+ return destFile.Close()
+ }
+
return nil
}
-func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk {
- if len(missingChunks) <= target {
- return missingChunks
+func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
+ getGap := func(missingParts []missingPart, i int) int {
+ prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
+ return int(missingParts[i].SourceChunk.Offset - prev)
+ }
+ getCost := func(missingParts []missingPart, i int) int {
+ cost := getGap(missingParts, i)
+ if missingParts[i-1].OriginFile != nil {
+ cost += int(missingParts[i-1].SourceChunk.Length)
+ }
+ if missingParts[i].OriginFile != nil {
+ cost += int(missingParts[i].SourceChunk.Length)
+ }
+ return cost
+ }
+
+ // simple case: merge chunks from the same file.
+ newMissingParts := missingParts[0:1]
+ prevIndex := 0
+ for i := 1; i < len(missingParts); i++ {
+ gap := getGap(missingParts, i)
+ if gap == 0 && missingParts[prevIndex].OriginFile == nil &&
+ missingParts[i].OriginFile == nil &&
+ !missingParts[prevIndex].Hole && !missingParts[i].Hole &&
+ len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
+ missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
+ missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
+ missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
+ } else {
+ newMissingParts = append(newMissingParts, missingParts[i])
+ prevIndex++
+ }
}
+ missingParts = newMissingParts
- getGap := func(missingChunks []missingChunk, i int) int {
- prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length
- return int(missingChunks[i].RawChunk.Offset - prev)
+ if len(missingParts) <= target {
+ return missingParts
}
// this implementation doesn't account for duplicates, so it could merge
// more than necessary to reach the specified target. Since target itself
// is a heuristic value, it doesn't matter.
- var gaps []int
- for i := 1; i < len(missingChunks); i++ {
- gaps = append(gaps, getGap(missingChunks, i))
+ costs := make([]int, len(missingParts)-1)
+ for i := 1; i < len(missingParts); i++ {
+ costs[i-1] = getCost(missingParts, i)
}
- sort.Ints(gaps)
+ sort.Ints(costs)
- toShrink := len(missingChunks) - target
- targetValue := gaps[toShrink-1]
+ toShrink := len(missingParts) - target
+ if toShrink >= len(costs) {
+ toShrink = len(costs) - 1
+ }
+ targetValue := costs[toShrink]
- newMissingChunks := missingChunks[0:1]
- for i := 1; i < len(missingChunks); i++ {
- gap := getGap(missingChunks, i)
- if gap > targetValue {
- newMissingChunks = append(newMissingChunks, missingChunks[i])
+ newMissingParts = missingParts[0:1]
+ for i := 1; i < len(missingParts); i++ {
+ if getCost(missingParts, i) > targetValue {
+ newMissingParts = append(newMissingParts, missingParts[i])
} else {
- prev := &newMissingChunks[len(newMissingChunks)-1]
- prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length
+ gap := getGap(missingParts, i)
+ prev := &newMissingParts[len(newMissingParts)-1]
+ prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ prev.Hole = false
+ prev.OriginFile = nil
if gap > 0 {
- gapFile := missingFile{
+ gapFile := missingFileChunk{
Gap: int64(gap),
}
- prev.Files = append(prev.Files, gapFile)
+ prev.Chunks = append(prev.Chunks, gapFile)
}
- prev.Files = append(prev.Files, missingChunks[i].Files...)
+ prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...)
}
}
- return newMissingChunks
+ return newMissingParts
}
-func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
+func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
var chunksToRequest []ImageSourceChunk
- for _, c := range missingChunks {
- chunksToRequest = append(chunksToRequest, c.RawChunk)
+ for _, c := range missingParts {
+ if c.OriginFile == nil && !c.Hole {
+ chunksToRequest = append(chunksToRequest, *c.SourceChunk)
+ }
}
// There are some missing files. Prepare a multirange request for the missing chunks.
@@ -921,20 +1098,20 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChun
}
if _, ok := err.(ErrBadRequest); ok {
- requested := len(missingChunks)
+ requested := len(missingParts)
// If the server cannot handle at least 64 chunks in a single request, just give up.
if requested < 64 {
return err
}
// Merge more chunks to request
- missingChunks = mergeMissingChunks(missingChunks, requested/2)
+ missingParts = mergeMissingChunks(missingParts, requested/2)
continue
}
return err
}
- if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil {
+ if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil {
return err
}
return nil
@@ -960,7 +1137,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.File
}
}
- file, err := openFileUnderRoot(name, dirfd, unix.O_DIRECTORY|unix.O_RDONLY, 0)
+ file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -1109,7 +1286,69 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo
return def
}
+type findAndCopyFileOptions struct {
+ useHardLinks bool
+ enableHostDedup bool
+ ostreeRepos []string
+ options *archive.TarOptions
+}
+
+func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
+ finalizeFile := func(dstFile *os.File) error {
+ if dstFile != nil {
+ defer dstFile.Close()
+ if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ if copyOptions.enableHostDedup {
+ found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
+ defer c.layersCache.release()
+ defer func() {
+ if c.zstdReader != nil {
+ c.zstdReader.Close()
+ }
+ }()
+
bigData := map[string][]byte{
bigDataKey: c.manifest,
}
@@ -1137,14 +1376,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":")
// Generate the manifest
- var toc internal.TOC
- if err := json.Unmarshal(c.manifest, &toc); err != nil {
+ toc, err := unmarshalToc(c.manifest)
+ if err != nil {
return output, err
}
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
- var missingChunks []missingChunk
+ var missingParts []missingPart
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil {
@@ -1170,13 +1409,57 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
defer unix.Close(dirfd)
- otherLayersCache := prepareOtherLayersCache(c.layersMetadata)
-
// hardlinks can point to missing files. So create them after all files
// are retrieved
var hardLinks []hardLinkToCreate
- missingChunksSize, totalChunksSize := int64(0), int64(0)
+ missingPartsSize, totalChunksSize := int64(0), int64(0)
+
+ copyOptions := findAndCopyFileOptions{
+ useHardLinks: useHardLinks,
+ enableHostDedup: enableHostDedup,
+ ostreeRepos: ostreeRepos,
+ options: options,
+ }
+
+ type copyFileJob struct {
+ njob int
+ index int
+ mode os.FileMode
+ metadata *internal.FileMetadata
+
+ found bool
+ err error
+ }
+
+ var wg sync.WaitGroup
+
+ copyResults := make([]copyFileJob, len(mergedEntries))
+
+ copyFileJobs := make(chan copyFileJob)
+ defer func() {
+ if copyFileJobs != nil {
+ close(copyFileJobs)
+ }
+ wg.Wait()
+ }()
+
+ for i := 0; i < copyGoRoutines; i++ {
+ wg.Add(1)
+ jobs := copyFileJobs
+
+ go func() {
+ defer wg.Done()
+ for job := range jobs {
+ found, err := c.findAndCopyFile(dirfd, job.metadata, &copyOptions, job.mode)
+ job.err = err
+ job.found = found
+ copyResults[job.njob] = job
+ }
+ }()
+ }
+
+ filesToWaitFor := 0
for i, r := range mergedEntries {
if options.ForceMask != nil {
value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777)
@@ -1272,74 +1555,95 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
totalChunksSize += r.Size
- finalizeFile := func(dstFile *os.File) error {
- if dstFile != nil {
- defer dstFile.Close()
- if err := setFileAttrs(dirfd, dstFile, mode, &r, options, false); err != nil {
- return err
- }
+ if t == tar.TypeReg {
+ index := i
+ njob := filesToWaitFor
+ job := copyFileJob{
+ mode: mode,
+ metadata: &mergedEntries[index],
+ index: index,
+ njob: njob,
}
- return nil
+ copyFileJobs <- job
+ filesToWaitFor++
}
+ }
- found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks)
- if err != nil {
- return output, err
- }
- if found {
- if err := finalizeFile(dstFile); err != nil {
- return output, err
- }
- continue
- }
+ close(copyFileJobs)
+ copyFileJobs = nil
- found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks)
- if err != nil {
- return output, err
+ wg.Wait()
+
+ for _, res := range copyResults[:filesToWaitFor] {
+ if res.err != nil {
+ return output, res.err
}
- if found {
- if err := finalizeFile(dstFile); err != nil {
- return output, err
- }
+ // the file was already copied to its destination
+ // so nothing left to do.
+ if res.found {
continue
}
- if enableHostDedup {
- found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks)
- if err != nil {
- return output, err
- }
- if found {
- if err := finalizeFile(dstFile); err != nil {
- return output, err
- }
- continue
+ r := &mergedEntries[res.index]
+
+ missingPartsSize += r.Size
+
+ remainingSize := r.Size
+
+ // the file is missing, attempt to find individual chunks.
+ for _, chunk := range r.Chunks {
+ compressedSize := int64(chunk.EndOffset - chunk.Offset)
+ size := remainingSize
+ if chunk.ChunkSize > 0 {
+ size = chunk.ChunkSize
}
- }
+ remainingSize = remainingSize - size
- missingChunksSize += r.Size
- if t == tar.TypeReg {
rawChunk := ImageSourceChunk{
- Offset: uint64(r.Offset),
- Length: uint64(r.EndOffset - r.Offset),
+ Offset: uint64(chunk.Offset),
+ Length: uint64(compressedSize),
}
-
- file := missingFile{
- File: &mergedEntries[i],
+ file := missingFileChunk{
+ File: &mergedEntries[res.index],
+ CompressedSize: compressedSize,
+ UncompressedSize: size,
}
-
- missingChunks = append(missingChunks, missingChunk{
- RawChunk: rawChunk,
- Files: []missingFile{
+ mp := missingPart{
+ SourceChunk: &rawChunk,
+ Chunks: []missingFileChunk{
file,
},
- })
+ }
+
+ switch chunk.ChunkType {
+ case internal.ChunkTypeData:
+ root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
+ if err != nil {
+ return output, err
+ }
+ if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) {
+ missingPartsSize -= size
+ mp.OriginFile = &originFile{
+ Root: root,
+ Path: path,
+ Offset: offset,
+ }
+ }
+ case internal.ChunkTypeZeros:
+ missingPartsSize -= size
+ mp.Hole = true
+ // Mark all chunks belonging to the missing part as holes
+ for i := range mp.Chunks {
+ mp.Chunks[i].Hole = true
+ }
+ }
+ missingParts = append(missingParts, mp)
}
}
// There are some missing files. Prepare a multirange request for the missing chunks.
- if len(missingChunks) > 0 {
- missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks)
- if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil {
+ if len(missingParts) > 0 {
+ missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
+ if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil {
return output, err
}
}
@@ -1351,31 +1655,69 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
if totalChunksSize > 0 {
- logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize))
+ logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
return output, nil
}
+func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
+ // ignore the metadata files for the estargz format.
+ if fileType != fileTypeEstargz {
+ return false
+ }
+ switch e.Name {
+ // ignore the metadata files for the estargz format.
+ case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName:
+ return true
+ }
+ return false
+}
+
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) {
- var mergedEntries []internal.FileMetadata
- var prevEntry *internal.FileMetadata
- for _, entry := range entries {
- e := entry
+ countNextChunks := func(start int) int {
+ count := 0
+ for _, e := range entries[start:] {
+ if e.Type != TypeChunk {
+ return count
+ }
+ count++
+ }
+ return count
+ }
- // ignore the metadata files for the estargz format.
- if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) {
+ size := 0
+ for _, entry := range entries {
+ if mustSkipFile(fileType, entry) {
continue
}
+ if entry.Type != TypeChunk {
+ size++
+ }
+ }
+ mergedEntries := make([]internal.FileMetadata, size)
+ m := 0
+ for i := 0; i < len(entries); i++ {
+ e := entries[i]
+ if mustSkipFile(fileType, e) {
+ continue
+ }
if e.Type == TypeChunk {
- if prevEntry == nil || prevEntry.Type != TypeReg {
- return nil, errors.New("chunk type without a regular file")
+ return nil, fmt.Errorf("chunk type without a regular file")
+ }
+
+ if e.Type == TypeReg {
+ nChunks := countNextChunks(i + 1)
+
+ e.Chunks = make([]*internal.FileMetadata, nChunks+1)
+ for j := 0; j <= nChunks; j++ {
+ e.Chunks[j] = &entries[i+j]
+ e.EndOffset = entries[i+j].EndOffset
}
- prevEntry.EndOffset = e.EndOffset
- continue
+ i += nChunks
}
- mergedEntries = append(mergedEntries, e)
- prevEntry = &e
+ mergedEntries[m] = e
+ m++
}
// stargz/estargz doesn't store EndOffset so let's calculate it here
lastOffset := c.tocOffset
@@ -1386,6 +1728,47 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
if mergedEntries[i].Offset != 0 {
lastOffset = mergedEntries[i].Offset
}
+
+ lastChunkOffset := mergedEntries[i].EndOffset
+ for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- {
+ mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset
+ mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset
+ lastChunkOffset = mergedEntries[i].Chunks[j].Offset
+ }
}
return mergedEntries, nil
}
+
+// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
+// same digest as chunk.ChunkDigest
+func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
+ parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
+ if err != nil {
+ return false
+ }
+ defer unix.Close(parentDirfd)
+
+ fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0)
+ if err != nil {
+ return false
+ }
+ defer fd.Close()
+
+ if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil {
+ return false
+ }
+
+ r := io.LimitReader(fd, chunk.ChunkSize)
+ digester := digest.Canonical.Digester()
+
+ if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil {
+ return false
+ }
+
+ digest, err := digest.Parse(chunk.ChunkDigest)
+ if err != nil {
+ return false
+ }
+
+ return digester.Digest() == digest
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 83e797599..0abe886eb 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -82,7 +82,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(uidMap) == 1 && uidMap[0].Size == 1 {
uid = uidMap[0].HostID
} else {
- uid, err = toHost(0, uidMap)
+ uid, err = RawToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
@@ -90,7 +90,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(gidMap) == 1 && gidMap[0].Size == 1 {
gid = gidMap[0].HostID
} else {
- gid, err = toHost(0, gidMap)
+ gid, err = RawToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
@@ -98,10 +98,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
return uid, gid, nil
}
-// toContainer takes an id mapping, and uses it to translate a
-// host ID to the remapped ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id
-func toContainer(hostID int, idMap []IDMap) (int, error) {
+// RawToContainer takes an id mapping, and uses it to translate a host ID to
+// the remapped ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
@@ -114,10 +118,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) {
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
}
-// toHost takes an id mapping and a remapped ID, and translates the
-// ID to the mapped host ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id #
-func toHost(contID int, idMap []IDMap) (int, error) {
+// RawToHost takes an id mapping and a remapped ID, and translates the ID to
+// the mapped host ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
@@ -187,22 +195,22 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
var err error
var target IDPair
- target.UID, err = toHost(pair.UID, i.uids)
+ target.UID, err = RawToHost(pair.UID, i.uids)
if err != nil {
return target, err
}
- target.GID, err = toHost(pair.GID, i.gids)
+ target.GID, err = RawToHost(pair.GID, i.gids)
return target, err
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
- uid, err := toContainer(pair.UID, i.uids)
+ uid, err := RawToContainer(pair.UID, i.uids)
if err != nil {
return -1, -1, err
}
- gid, err := toContainer(pair.GID, i.gids)
+ gid, err := RawToContainer(pair.GID, i.gids)
return uid, gid, err
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
index e444a1bcc..6e6e3b22b 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -12,10 +12,14 @@ import (
#cgo LDFLAGS: -l subid
#include <shadow/subid.h>
#include <stdlib.h>
+#include <stdio.h>
const char *Prog = "storage";
+FILE *shadow_logfd = NULL;
+
struct subid_range get_range(struct subid_range *ranges, int i)
{
- return ranges[i];
+ shadow_logfd = stderr;
+ return ranges[i];
}
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index bf1cd4f38..8ba04ab10 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -647,17 +647,21 @@ func GetStore(options types.StoreOptions) (Store, error) {
storesLock.Lock()
defer storesLock.Unlock()
+ // return if BOTH run and graph root are matched, otherwise our run-root can be overriden if the graph is found first
for _, s := range stores {
- if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
+ if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
- if options.GraphRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
- }
- if options.RunRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
+ // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
+ switch {
+ case options.RunRoot == "" && options.GraphRoot == "":
+ return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified")
+ case options.GraphRoot == "":
+ options.GraphRoot = types.Options().GraphRoot
+ case options.RunRoot == "":
+ options.RunRoot = types.Options().RunRoot
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
@@ -2497,23 +2501,29 @@ func (s *store) DeleteContainer(id string) error {
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
- var err error
- for attempts := 0; attempts < 50; attempts++ {
- err = os.RemoveAll(gcpath)
- if err == nil || !system.IsEBUSY(err) {
- break
- }
- time.Sleep(time.Millisecond * 100)
+ defer wg.Done()
+ // attempt a simple rm -rf first
+ err := os.RemoveAll(gcpath)
+ if err == nil {
+ errChan <- nil
+ return
}
- errChan <- err
- wg.Done()
+ // and if it fails get to the more complicated cleanup
+ errChan <- system.EnsureRemoveAll(gcpath)
}()
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
- errChan <- os.RemoveAll(rcpath)
- wg.Done()
+ defer wg.Done()
+ // attempt a simple rm -rf first
+ err := os.RemoveAll(rcpath)
+ if err == nil {
+ errChan <- nil
+ return
+ }
+ // and if it fails get to the more complicated cleanup
+ errChan <- system.EnsureRemoveAll(rcpath)
}()
go func() {
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index 7586cd5ae..20d041f79 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -17,7 +17,7 @@ import (
)
// TOML-friendly explicit tables used for conversions.
-type tomlConfig struct {
+type TomlConfig struct {
Storage struct {
Driver string `toml:"driver"`
RunRoot string `toml:"runroot"`
@@ -306,7 +306,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
- config := new(tomlConfig)
+ config := new(TomlConfig)
meta, err := toml.DecodeFile(configFile, &config)
if err == nil {
@@ -424,3 +424,38 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
func Options() StoreOptions {
return defaultStoreOptions
}
+
+// Save overwrites the tomlConfig in storage.conf with the given conf
+func Save(conf TomlConfig, rootless bool) error {
+ configFile, err := DefaultConfigFile(rootless)
+ if err != nil {
+ return err
+ }
+ if err = os.Remove(configFile); !os.IsNotExist(err) {
+ return err
+ }
+
+ f, err := os.Open(configFile)
+ if err != nil {
+ return err
+ }
+
+ return toml.NewEncoder(f).Encode(conf)
+}
+
+// StorageConfig is used to retreive the storage.conf toml in order to overwrite it
+func StorageConfig(rootless bool) (*TomlConfig, error) {
+ config := new(TomlConfig)
+
+ configFile, err := DefaultConfigFile(rootless)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = toml.DecodeFile(configFile, &config)
+ if err != nil {
+ return nil, err
+ }
+
+ return config, nil
+}
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index c9014ce1d..0af08e65e 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -3,6 +3,7 @@
before:
hooks:
- ./gen.sh
+ - go install mvdan.cc/garble@latest
builds:
-
@@ -31,6 +32,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
-
id: "s2d"
binary: s2d
@@ -57,6 +59,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
-
id: "s2sx"
binary: s2sx
@@ -84,6 +87,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
archives:
-
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 3429879eb..e8ff994f8 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,13 @@ This package provides various compression algorithms.
# changelog
+* Jan 11, 2022 (v1.14.1)
+ * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
+ * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
+ * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
+ * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
+ * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+
* Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@@ -432,6 +439,13 @@ For more information see my blog post on [Fast Linear Time Compression](http://b
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
+# Other packages
+
+Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
+
+* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
+* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
+* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
# license
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 5283ac5a5..b27f5a93b 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -6,9 +6,13 @@
package flate
import (
+ "encoding/binary"
"fmt"
"io"
"math"
+ "math/bits"
+
+ comp "github.com/klauspost/compress"
)
const (
@@ -37,15 +41,17 @@ const (
maxMatchLength = 258 // The longest match for the compressor
minOffsetSize = 1 // The shortest offset that makes any sense
- // The maximum number of tokens we put into a single flat block, just too
- // stop things from getting too large.
- maxFlateBlockTokens = 1 << 14
+ // The maximum number of tokens we will encode at the time.
+ // Smaller sizes usually creates less optimal blocks.
+ // Bigger can make context switching slow.
+ // We use this for levels 7-9, so we make it big.
+ maxFlateBlockTokens = 1 << 15
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
- maxHashOffset = 1 << 24
+ maxHashOffset = 1 << 28
skipNever = math.MaxInt32
@@ -70,9 +76,9 @@ var levels = []compressionLevel{
{0, 0, 0, 0, 0, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
- {8, 8, 24, 16, skipNever, 7},
- {10, 16, 24, 64, skipNever, 8},
- {32, 258, 258, 4096, skipNever, 9},
+ {6, 10, 12, 16, skipNever, 7},
+ {10, 24, 32, 64, skipNever, 8},
+ {32, 258, 258, 1024, skipNever, 9},
}
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
@@ -93,8 +99,9 @@ type advancedState struct {
hashOffset int
// input window: unprocessed data is window[index:windowEnd]
- index int
- hashMatch [maxMatchLength + minMatchLength]uint32
+ index int
+ estBitsPerByte int
+ hashMatch [maxMatchLength + minMatchLength]uint32
hash uint32
ii uint16 // position of last match, intended to overflow to reset.
@@ -170,7 +177,8 @@ func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
window = d.window[d.blockStart:index]
}
d.blockStart = index
- d.w.writeBlock(tok, eof, window)
+ //d.w.writeBlock(tok, eof, window)
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
return d.w.err
}
return nil
@@ -263,7 +271,7 @@ func (d *compressor) fillWindow(b []byte) {
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
-func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
@@ -279,36 +287,43 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
- length = prevLength
- if length >= d.good {
- tries >>= 2
- }
+ length = minMatchLength - 1
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+ offset = 0
+ // Base is 4 bytes at with an additional cost.
+ // Matches must be better than this.
+ cGain := minMatchLength*bpb - 12
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
-
- if n > length && (n > minMatchLength || pos-i <= 4096) {
- length = n
- offset = pos - i
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
+ if n > length {
+ newGain := n*bpb - bits.Len32(uint32(pos-i))
+ if newGain > cGain {
+ length = n
+ offset = pos - i
+ cGain = newGain
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
}
- wEnd = win[pos+n]
}
}
- if i == minIndex {
+ if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
- if i < minIndex || i < 0 {
+ if i < minIndex {
break
}
}
@@ -327,8 +342,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func hash4(b []byte) uint32 {
- b = b[:4]
- return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
+ return hash4u(binary.LittleEndian.Uint32(b), hashBits)
}
// bulkHash4 will compute hashes using the same
@@ -337,11 +351,12 @@ func bulkHash4(b []byte, dst []uint32) {
if len(b) < 4 {
return
}
- hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ hb := binary.LittleEndian.Uint32(b)
+
dst[0] = hash4u(hb, hashBits)
end := len(b) - 4 + 1
for i := 1; i < end; i++ {
- hb = (hb << 8) | uint32(b[i+3])
+ hb = (hb >> 8) | uint32(b[i+3])<<24
dst[i] = hash4u(hb, hashBits)
}
}
@@ -374,10 +389,15 @@ func (d *compressor) deflateLazy() {
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
+ s.estBitsPerByte = 8
+ if !d.sync {
+ s.estBitsPerByte = comp.ShannonEntropyBits(d.window[s.index:d.windowEnd])
+ s.estBitsPerByte = int(1 + float64(s.estBitsPerByte)/float64(d.windowEnd-s.index))
+ }
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ s.hash = hash4(d.window[s.index:])
}
for {
@@ -410,7 +430,7 @@ func (d *compressor) deflateLazy() {
}
if s.index < s.maxInsertIndex {
// Update the hash
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ s.hash = hash4(d.window[s.index:])
ch := s.hashHead[s.hash&hashMask]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
@@ -426,12 +446,37 @@ func (d *compressor) deflateLazy() {
}
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, s.estBitsPerByte); ok {
s.length = newLength
s.offset = newOffset
}
}
+
if prevLength >= minMatchLength && s.length <= prevLength {
+ // Check for better match at end...
+ //
+ // checkOff must be >=2 since we otherwise risk checking s.index
+ // Offset of 2 seems to yield best results.
+ const checkOff = 2
+ prevIndex := s.index - 1
+ if prevIndex+prevLength+checkOff < s.maxInsertIndex {
+ end := lookahead
+ if lookahead > maxMatchLength {
+ end = maxMatchLength
+ }
+ end += prevIndex
+ idx := prevIndex + prevLength - (4 - checkOff)
+ h := hash4(d.window[idx:])
+ ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff)
+ if ch2 > minIndex {
+ length := matchLen(d.window[prevIndex:end], d.window[ch2:])
+ // It seems like a pure length metric is best.
+ if length > prevLength {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+ }
+ }
+ }
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
@@ -479,6 +524,7 @@ func (d *compressor) deflateLazy() {
}
d.tokens.Reset()
}
+ s.ii = 0
} else {
// Reset, if we got a match this run.
if s.length >= minMatchLength {
@@ -498,13 +544,12 @@ func (d *compressor) deflateLazy() {
// If we have a long run of no matches, skip additional bytes
// Resets when s.ii overflows after 64KB.
- if s.ii > 31 {
- n := int(s.ii >> 5)
+ if n := int(s.ii) - d.chain; n > 0 {
+ n = 1 + int(n>>6)
for j := 0; j < n; j++ {
if s.index >= d.windowEnd-1 {
break
}
-
d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
@@ -512,6 +557,14 @@ func (d *compressor) deflateLazy() {
}
d.tokens.Reset()
}
+ // Index...
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
s.index++
}
// Flush last byte
@@ -611,7 +664,9 @@ func (d *compressor) write(b []byte) (n int, err error) {
}
n = len(b)
for len(b) > 0 {
- d.step(d)
+ if d.windowEnd == len(d.window) || d.sync {
+ d.step(d)
+ }
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
@@ -652,13 +707,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
level = 5
fallthrough
case level >= 1 && level <= 6:
- d.w.logNewTablePenalty = 8
+ d.w.logNewTablePenalty = 7
d.fast = newFastEnc(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
case 7 <= level && level <= 9:
- d.w.logNewTablePenalty = 10
+ d.w.logNewTablePenalty = 8
d.state = &advancedState{}
d.compressionLevel = levels[level]
d.initDeflate()
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index a746eb733..0b2e54972 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -213,11 +213,9 @@ func (e *fastGen) Reset() {
// matchLen returns the maximum length.
// 'a' must be the shortest of the two.
func matchLen(a, b []byte) int {
- b = b[:len(a)]
var checked int
for len(a) >= 8 {
- b = b[:len(a)]
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
return checked + (bits.TrailingZeros64(diff) >> 3)
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 3ad5e9807..fb1701eec 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -155,37 +155,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) {
w.lastHuffMan = false
}
-func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
- offsets, lits = true, true
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
a := t.offHist[:offsetCodeCount]
- b := w.offsetFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- offsets = false
- break
+ b := w.offsetEncoding.codes
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].len == 0 {
+ return false
}
}
a = t.extraHist[:literalCount-256]
- b = w.literalFreq[256:literalCount]
+ b = w.literalEncoding.codes[256:literalCount]
b = b[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
+ for i, v := range a {
+ if v != 0 && b[i].len == 0 {
+ return false
}
}
- if lits {
- a = t.litHist[:]
- b = w.literalFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
- }
+
+ a = t.litHist[:256]
+ b = w.literalEncoding.codes[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].len == 0 {
+ return false
}
}
- return
+ return true
}
func (w *huffmanBitWriter) flush() {
@@ -222,7 +218,7 @@ func (w *huffmanBitWriter) write(b []byte) {
}
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
- w.bits |= uint64(b) << w.nbits
+ w.bits |= uint64(b) << (w.nbits & 63)
w.nbits += nb
if w.nbits >= 48 {
w.writeOutBits()
@@ -423,7 +419,7 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
func (w *huffmanBitWriter) writeCode(c hcode) {
// The function does not get inlined if we "& 63" the shift.
- w.bits |= uint64(c.code) << w.nbits
+ w.bits |= uint64(c.code) << (w.nbits & 63)
w.nbits += c.len
if w.nbits >= 48 {
w.writeOutBits()
@@ -566,7 +562,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
w.lastHeader = 0
}
numLiterals, numOffsets := w.indexTokens(tokens, false)
- w.generate(tokens)
+ w.generate()
var extraBits int
storedSize, storable := w.storedSize(input)
if storable {
@@ -595,7 +591,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
}
// Stored bytes?
- if storable && storedSize < size {
+ if storable && storedSize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
@@ -634,22 +630,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.lastHeader = 0
w.lastHuffMan = false
}
- if !sync {
- tokens.Fill()
+
+ // fillReuse enables filling of empty values.
+ // This will make encodings always reusable without testing.
+ // However, this does not appear to benefit on most cases.
+ const fillReuse = false
+
+ // Check if we can reuse...
+ if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
}
+
numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+ extraBits := 0
+ ssize, storable := w.storedSize(input)
+
+ const usePrefs = true
+ if storable || w.lastHeader > 0 {
+ extraBits = w.extraBitSize()
+ }
var size int
+
// Check if we should reuse.
if w.lastHeader > 0 {
// Estimate size for using a new table.
// Use the previous header size as the best estimate.
newSize := w.lastHeader + tokens.EstimatedBits()
- newSize += newSize >> w.logNewTablePenalty
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty
// The estimated size is calculated as an optimal table.
// We add a penalty to make it more realistic and re-use a bit more.
- reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
// Check if a new table is better.
if newSize < reuseSize {
@@ -660,35 +673,79 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
} else {
size = reuseSize
}
+
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
// Check if we get a reasonable size decrease.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ if storable && ssize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
- w.lastHeader = 0
return
}
}
// We want a new block/table
if w.lastHeader == 0 {
- w.generate(tokens)
+ if fillReuse && !sync {
+ w.fillTokens()
+ numLiterals, numOffsets = maxNumLit, maxNumDist
+ } else {
+ w.literalFreq[endBlockMarker] = 1
+ }
+
+ w.generate()
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
var numCodegens int
- size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ if fillReuse && !sync {
+ // Reindex for accurate size...
+ w.indexTokens(tokens, true)
+ }
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ // Store predefined, if we don't get a reasonable improvement.
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+
+ if storable && ssize <= size {
+ // Store bytes, if we don't get an improvement.
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
- w.lastHeader = 0
return
}
// Write Huffman table.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- w.lastHeader, _ = w.headerSize()
+ if !sync {
+ w.lastHeader, _ = w.headerSize()
+ }
w.lastHuffMan = false
}
@@ -699,6 +756,19 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
}
+func (w *huffmanBitWriter) fillTokens() {
+ for i, v := range w.literalFreq[:literalCount] {
+ if v == 0 {
+ w.literalFreq[i] = 1
+ }
+ }
+ for i, v := range w.offsetFreq[:offsetCodeCount] {
+ if v == 0 {
+ w.offsetFreq[i] = 1
+ }
+ }
+}
+
// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
@@ -733,7 +803,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num
return
}
-func (w *huffmanBitWriter) generate(t *tokens) {
+func (w *huffmanBitWriter) generate() {
w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
}
@@ -768,7 +838,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
if t < matchType {
//w.writeCode(lits[t.literal()])
c := lits[t.literal()]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -796,7 +866,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := lengths[lengthCode&31]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -819,7 +889,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
if extraLengthBits > 0 {
//w.writeBits(extraLength, extraLengthBits)
extraLength := int32(length - lengthBase[lengthCode&31])
- bits |= uint64(extraLength) << nbits
+ bits |= uint64(extraLength) << (nbits & 63)
nbits += extraLengthBits
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -846,7 +916,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := offs[offsetCode]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -867,7 +937,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
offsetComb := offsetCombined[offsetCode]
if offsetComb > 1<<16 {
//w.writeBits(extraOffset, extraOffsetBits)
- bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits
+ bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
nbits += uint16(offsetComb >> 16)
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -996,10 +1066,41 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
encoding := w.literalEncoding.codes[:256]
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ // Unroll, write 3 codes/loop.
+ // Fastest number of unrolls.
+ for len(input) > 3 {
+ // We must have at least 48 bits free.
+ if nbits >= 8 {
+ n := nbits >> 3
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ bits >>= (n * 8) & 63
+ nbits -= n * 8
+ nbytes += uint8(n)
+ }
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ a, b := encoding[input[0]], encoding[input[1]]
+ bits |= uint64(a.code) << (nbits & 63)
+ bits |= uint64(b.code) << ((nbits + a.len) & 63)
+ c := encoding[input[2]]
+ nbits += b.len + a.len
+ bits |= uint64(c.code) << (nbits & 63)
+ nbits += c.len
+ input = input[3:]
+ }
+
+ // Remaining...
for _, t := range input {
// Bitwriting inlined, ~30% speedup
c := encoding[t]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if debugDeflate {
count += int(c.len)
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index d1edb356c..d5f62f6a2 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -328,11 +328,17 @@ func (f *decompressor) nextBlock() {
switch typ {
case 0:
f.dataBlock()
+ if debugDecode {
+ fmt.Println("stored block")
+ }
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("predefinied huffman block")
+ }
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
@@ -341,6 +347,9 @@ func (f *decompressor) nextBlock() {
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("dynamic huffman block")
+ }
default:
// 3 is reserved.
if debugDecode {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 9b7cc8e97..2a06bd1a7 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -20,7 +20,7 @@ type dEntrySingle struct {
// double-symbols decoding
type dEntryDouble struct {
- seq uint16
+ seq [4]byte
nBits uint8
len uint8
}
@@ -753,23 +753,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
-
val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
-
val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
}
@@ -780,23 +778,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
-
val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
-
val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
}
@@ -914,7 +910,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
out := dst
dstEvery := (dstSize + 3) / 4
- shift := (8 - d.actualTableLog) & 7
+ shift := (56 + (8 - d.actualTableLog)) & 63
const tlSize = 1 << 8
single := d.dt.single[:tlSize]
@@ -935,79 +931,91 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
}
{
const stream = 2
const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
- v2 = single[br[stream2].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
}
off += 4
@@ -1073,7 +1081,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
}
// Read value and increment offset.
- v := single[br.peekByteFast()>>shift].entry
+ v := single[uint8(br.value>>shift)].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= int(nBits)
@@ -1121,7 +1129,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
out := dst
dstEvery := (dstSize + 3) / 4
- const shift = 0
+ const shift = 56
const tlSize = 1 << 8
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
@@ -1145,37 +1153,41 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
br[stream].fillFast()
br[stream2].fillFast()
- v := single[br[stream].peekByteFast()>>shift].entry
+ v := single[uint8(br[stream].value>>shift)].entry
+ v2 := single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
}
{
@@ -1184,37 +1196,41 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
br[stream].fillFast()
br[stream2].fillFast()
- v := single[br[stream].peekByteFast()>>shift].entry
+ v := single[uint8(br[stream].value>>shift)].entry
+ v2 := single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
}
off += 4
@@ -1280,7 +1296,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
}
// Read value and increment offset.
- v := single[br.peekByteFast()>>shift].entry
+ v := single[br.peekByteFast()].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= int(nBits)
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 854458537..753d17df6 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -50,16 +50,23 @@ func (b *bitReader) getBits(n uint8) int {
if n == 0 /*|| b.bitsRead >= 64 */ {
return 0
}
- return b.getBitsFast(n)
+ return int(b.get32BitsFast(n))
}
-// getBitsFast requires that at least one bit is requested every time.
+// get32BitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
-func (b *bitReader) getBitsFast(n uint8) int {
+func (b *bitReader) get32BitsFast(n uint8) uint32 {
const regMask = 64 - 1
v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
- return int(v)
+ return v
+}
+
+func (b *bitReader) get16BitsFast(n uint8) uint16 {
+ const regMask = 64 - 1
+ v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+ b.bitsRead += n
+ return v
}
// fillFast() will make sure at least 32 bits are available.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index 303ae90f9..b36618285 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -38,7 +38,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.nBits += bits
}
-// addBits32NC will add up to 32 bits.
+// addBits32NC will add up to 31 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
@@ -46,6 +46,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
b.nBits += bits
}
+// addBits64NC will add up to 64 bits.
+// There must be space for 32 bits.
+func (b *bitWriter) addBits64NC(value uint64, bits uint8) {
+ if bits <= 31 {
+ b.addBits32Clean(uint32(value), bits)
+ return
+ }
+ b.addBits32Clean(uint32(value), 32)
+ b.flush32()
+ b.addBits32Clean(uint32(value>>32), bits-32)
+}
+
+// addBits32Clean will add up to 32 bits.
+// It will not check if there is space for them.
+// The input must not contain more bits than specified.
+func (b *bitWriter) addBits32Clean(value uint32, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 3df185ee4..12e8f6f0b 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -51,7 +51,7 @@ func (b *blockEnc) init() {
if cap(b.literals) < maxCompressedBlockSize {
b.literals = make([]byte, 0, maxCompressedBlockSize)
}
- const defSeqs = 200
+ const defSeqs = 2000
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
@@ -426,7 +426,7 @@ func fuzzFseEncoder(data []byte) int {
return 0
}
enc := fseEncoder{}
- hist := enc.Histogram()[:256]
+ hist := enc.Histogram()
maxSym := uint8(0)
for i, v := range data {
v = v & 63
@@ -722,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
}
seq--
- if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
- // No need to flush (common)
- for seq >= 0 {
- s = b.sequences[seq]
- wr.flush32()
- llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
- // tabelog max is 8 for all.
- of.encode(ofB)
- ml.encode(mlB)
- ll.encode(llB)
- wr.flush32()
-
- // We checked that all can stay within 32 bits
- wr.addBits32NC(s.litLen, llB.outBits)
- wr.addBits32NC(s.matchLen, mlB.outBits)
- wr.addBits32NC(s.offset, ofB.outBits)
-
- if debugSequences {
- println("Encoded seq", seq, s)
- }
-
- seq--
- }
- } else {
- for seq >= 0 {
- s = b.sequences[seq]
- wr.flush32()
- llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
- // tabelog max is below 8 for each.
- of.encode(ofB)
- ml.encode(mlB)
- ll.encode(llB)
- wr.flush32()
-
- // ml+ll = max 32 bits total
- wr.addBits32NC(s.litLen, llB.outBits)
- wr.addBits32NC(s.matchLen, mlB.outBits)
- wr.flush32()
- wr.addBits32NC(s.offset, ofB.outBits)
-
- if debugSequences {
- println("Encoded seq", seq, s)
- }
-
- seq--
- }
+ // Store sequences in reverse...
+ for seq >= 0 {
+ s = b.sequences[seq]
+
+ ofB := ofTT[s.ofCode]
+ wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
+ //of.encode(ofB)
+ nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16
+ dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState)
+ wr.addBits16NC(of.state, uint8(nbBitsOut))
+ of.state = of.stateTable[dstState]
+
+ // Accumulate extra bits.
+ outBits := ofB.outBits & 31
+ extraBits := uint64(s.offset & bitMask32[outBits])
+ extraBitsN := outBits
+
+ mlB := mlTT[s.mlCode]
+ //ml.encode(mlB)
+ nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16
+ dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState)
+ wr.addBits16NC(ml.state, uint8(nbBitsOut))
+ ml.state = ml.stateTable[dstState]
+
+ outBits = mlB.outBits & 31
+ extraBits = extraBits<<outBits | uint64(s.matchLen&bitMask32[outBits])
+ extraBitsN += outBits
+
+ llB := llTT[s.llCode]
+ //ll.encode(llB)
+ nbBitsOut = (uint32(ll.state) + llB.deltaNbBits) >> 16
+ dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState)
+ wr.addBits16NC(ll.state, uint8(nbBitsOut))
+ ll.state = ll.stateTable[dstState]
+
+ outBits = llB.outBits & 31
+ extraBits = extraBits<<outBits | uint64(s.litLen&bitMask32[outBits])
+ extraBitsN += outBits
+
+ wr.flush32()
+ wr.addBits64NC(extraBits, extraBitsN)
+
+ if debugSequences {
+ println("Encoded seq", seq, s)
+ }
+
+ seq--
}
ml.flush(mlEnc.actualTableLog)
of.flush(ofEnc.actualTableLog)
@@ -801,14 +802,13 @@ func (b *blockEnc) genCodes() {
// nothing to do
return
}
-
if len(b.sequences) > math.MaxUint16 {
panic("can only encode up to 64K sequences")
}
// No bounds checks after here:
- llH := b.coders.llEnc.Histogram()[:256]
- ofH := b.coders.ofEnc.Histogram()[:256]
- mlH := b.coders.mlEnc.Histogram()[:256]
+ llH := b.coders.llEnc.Histogram()
+ ofH := b.coders.ofEnc.Histogram()
+ mlH := b.coders.mlEnc.Histogram()
for i := range llH {
llH[i] = 0
}
@@ -820,7 +820,8 @@ func (b *blockEnc) genCodes() {
}
var llMax, ofMax, mlMax uint8
- for i, seq := range b.sequences {
+ for i := range b.sequences {
+ seq := &b.sequences[i]
v := llCode(seq.litLen)
seq.llCode = v
llH[v]++
@@ -844,7 +845,6 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
}
}
- b.sequences[i] = seq
}
maxCount := func(a []uint32) int {
var max uint32
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 295cd602a..15ae8ee80 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -108,11 +108,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) {
e.blk = enc
}
-func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
-
func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
if debugAsserts {
if s < 0 {
@@ -131,9 +126,24 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
}
}
+ a := src[s:]
+ b := src[t:]
+ b = b[:len(a)]
+ end := int32((len(a) >> 3) << 3)
+ for i := int32(0); i < end; i += 8 {
+ if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
+ return i + int32(bits.TrailingZeros64(diff)>>3)
+ }
+ }
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
+ a = a[end:]
+ b = b[end:]
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i) + end
+ }
+ }
+ return int32(len(a)) + end
}
// Reset the encoding table.
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f2502629b..5f08a2830 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -6,8 +6,6 @@ package zstd
import (
"fmt"
- "math"
- "math/bits"
)
const (
@@ -136,20 +134,7 @@ encodeLoop:
// Consider history as well.
var seq seq
var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
-
+ length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
@@ -236,20 +221,7 @@ encodeLoop:
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -286,20 +258,7 @@ encodeLoop:
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
@@ -418,21 +377,7 @@ encodeLoop:
if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
- // length := 4 + e.matchlen(s+6, repIndex+4, src)
- // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
- var length int32
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
@@ -522,21 +467,7 @@ encodeLoop:
panic(fmt.Sprintf("t (%d) < 0 ", t))
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlenNoHist(s+4, t+4, src) + 4
- // l := int32(matchLen(src[s+4:], src[t+4:])) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -573,21 +504,7 @@ encodeLoop:
if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlenNoHist(s+4, o2+4, src)
- // l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
@@ -731,19 +648,7 @@ encodeLoop:
// Consider history as well.
var seq seq
var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
@@ -831,20 +736,7 @@ encodeLoop:
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -881,20 +773,7 @@ encodeLoop:
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index e6d3d49b3..bb3d4fd6c 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -379,7 +379,7 @@ func (s decSymbol) final() (int, uint8) {
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
- lowBits := uint16(br.getBitsFast(s.state.nbBits()))
+ lowBits := br.get16BitsFast(s.state.nbBits())
s.state = s.dt[s.state.newState()+lowBits]
return s.state.baseline(), s.state.addBits()
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index b4757ee3f..5442061b1 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -62,9 +62,8 @@ func (s symbolTransform) String() string {
// To indicate that you have populated the histogram call HistogramFinished
// with the value of the highest populated symbol, as well as the number of entries
// in the most populated entry. These are accepted at face value.
-// The returned slice will always be length 256.
-func (s *fseEncoder) Histogram() []uint32 {
- return s.count[:]
+func (s *fseEncoder) Histogram() *[256]uint32 {
+ return &s.count
}
// HistogramFinished can be called to indicate that the histogram has been populated.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
new file mode 100644
index 000000000..662609589
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -0,0 +1,189 @@
+// +build gc,!purego
+
+#include "textflag.h"
+
+// Register allocation.
+#define digest R1
+#define h R2 // Return value.
+#define p R3 // Input pointer.
+#define len R4
+#define nblocks R5 // len / 32.
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc \
+
+// x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x \
+
+#define mergeRound(x) \
+ round0(x) \
+ EOR x, h \
+ MADD h, prime4, prime1, h \
+
+// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
+#define blocksLoop() \
+ LSR $5, len, nblocks \
+ PCALIGN $16 \
+loop: \
+ LDP.P 32(p), (x1, x2) \
+ round(v1, x1) \
+ LDP -16(p), (x3, x4) \
+ round(v2, x2) \
+ SUB $1, nblocks \
+ round(v3, x3) \
+ round(v4, x4) \
+ CBNZ nblocks, loop \
+
+
+// The primes are repeated here to ensure that they're stored
+// in a contiguous array, so we can load them with LDP.
+DATA primes<> +0(SB)/8, $11400714785074694791
+DATA primes<> +8(SB)/8, $14029467366897019727
+DATA primes<>+16(SB)/8, $1609587929392839161
+DATA primes<>+24(SB)/8, $9650029242287828579
+DATA primes<>+32(SB)/8, $2870177450012600261
+GLOBL primes<>(SB), NOPTR+RODATA, $40
+
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
+ LDP b_base+0(FP), (p, len)
+
+ LDP primes<> +0(SB), (prime1, prime2)
+ LDP primes<>+16(SB), (prime3, prime4)
+ MOVD primes<>+32(SB), prime5
+
+ CMP $32, len
+ CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
+ BLO afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blocksLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(v1)
+ mergeRound(v2)
+ mergeRound(v3)
+ mergeRound(v4)
+
+afterLoop:
+ ADD len, h
+
+ TBZ $4, len, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, len, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, len, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, len, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, len, end
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h
+ MUL prime1, h
+
+end:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+
+// func writeBlocks(d *Digest, b []byte) int
+//
+// Assumes len(b) >= 32.
+TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
+ LDP primes<>(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, len)
+
+ blocksLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, len
+ MOVD len, ret+32(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index 0ae847f75..9216e0a40 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -1,5 +1,8 @@
-//go:build !appengine && gc && !purego
-// +build !appengine,gc,!purego
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !purego
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 1f52f296e..2deb1ca75 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -1,5 +1,5 @@
-//go:build !amd64 || appengine || !gc || purego
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 1dd39e63b..bc731e4cb 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -278,7 +278,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
- bits := br.getBitsFast(nBits)
+ bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@@ -326,7 +326,7 @@ func (s *sequenceDecs) updateAlt(br *bitReader) {
s.offsets.state.state = s.offsets.state.dt[c.newState()]
return
}
- bits := br.getBitsFast(nBits)
+ bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ebe683486..714eeee00 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -11,7 +11,7 @@ github.com/Microsoft/go-winio/backuptar
github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/pkg/security
github.com/Microsoft/go-winio/vhd
-# github.com/Microsoft/hcsshim v0.9.1
+# github.com/Microsoft/hcsshim v0.9.2
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/computestorage
github.com/Microsoft/hcsshim/internal/cow
@@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.46.1-0.20220117145719-da777f8b15b1
+# github.com/containers/common v0.46.1-0.20220119203335-0e7aca71d00a
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests
@@ -230,7 +230,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.37.1-0.20211213220314-73a749e4fec5
+# github.com/containers/storage v1.38.0
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@@ -455,7 +455,7 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
-# github.com/klauspost/compress v1.13.6
+# github.com/klauspost/compress v1.14.1
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse