summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/storage/CODE-OF-CONDUCT.md3
-rw-r--r--vendor/github.com/containers/storage/Makefile11
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/go.mod4
-rw-r--r--vendor/github.com/containers/storage/go.sum4
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2156
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go101
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_others.go25
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go15
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_linux.go15
-rw-r--r--vendor/github.com/containers/storage/store.go7
-rw-r--r--vendor/github.com/containers/storage/utils.go52
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go24
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go9
-rw-r--r--vendor/github.com/klauspost/compress/flate/level1.go5
-rw-r--r--vendor/github.com/klauspost/compress/flate/level2.go6
-rw-r--r--vendor/github.com/klauspost/compress/flate/level3.go6
-rw-r--r--vendor/github.com/klauspost/compress/flate/level4.go4
-rw-r--r--vendor/github.com/klauspost/compress/flate/level5.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/level6.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/stateless.go67
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_go15.go29
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_posix.go7
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_windows.go7
-rw-r--r--vendor/modules.txt6
27 files changed, 2429 insertions, 147 deletions
diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
new file mode 100644
index 000000000..be0791620
--- /dev/null
+++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The Containers Storage Project Community Code of Conduct
+
+The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 1b69d6060..09937303b 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -54,19 +54,19 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.
containers-storage: $(sources) ## build using gc on the host
$(GO_BUILD) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
-layers_ffjson.go: layers.go
+layers_ffjson.go: $(FFJSON) layers.go
$(RM) $@
$(FFJSON) layers.go
-images_ffjson.go: images.go
+images_ffjson.go: $(FFJSON) images.go
$(RM) $@
$(FFJSON) images.go
-containers_ffjson.go: containers.go
+containers_ffjson.go: $(FFJSON) containers.go
$(RM) $@
$(FFJSON) containers.go
-pkg/archive/archive_ffjson.go: pkg/archive/archive.go
+pkg/archive/archive_ffjson.go: $(FFJSON) pkg/archive/archive.go
$(RM) $@
$(FFJSON) pkg/archive/archive.go
@@ -118,6 +118,9 @@ validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor u
install.tools:
make -C tests/tools
+$(FFJSON):
+ make -C tests/tools build/ffjson
+
install.docs: docs
make -C docs install
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 98e863cdf..15b989e39 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.15.8
+1.16.0
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index ba40f9c14..84dd86a20 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -7,10 +7,10 @@ require (
github.com/Microsoft/hcsshim v0.8.7
github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
github.com/docker/go-units v0.4.0
- github.com/klauspost/compress v1.9.8
+ github.com/klauspost/compress v1.10.0
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/pgzip v1.2.1
- github.com/mattn/go-shellwords v1.0.9
+ github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/runc v1.0.0-rc9
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index e2785594d..c2029949a 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -79,6 +79,8 @@ github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug2
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y=
+github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
@@ -93,6 +95,8 @@ github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWeP
github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.9 h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
+github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index 0dde97c18..e1954ad04 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./images.go
+// source: images.go
package storage
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
new file mode 100644
index 000000000..3a1095226
--- /dev/null
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -0,0 +1,2156 @@
+// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
+// source: layers.go
+
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/opencontainers/go-digest"
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+)
+
+// MarshalJSON marshal bytes to json - template
+func (j *DiffOptions) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if j.Compression != nil {
+ buf.WriteString(`{"Compression":`)
+ fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0)
+ } else {
+ buf.WriteString(`{"Compression":null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtDiffOptionsbase = iota
+ ffjtDiffOptionsnosuchkey
+
+ ffjtDiffOptionsCompression
+)
+
+var ffjKeyDiffOptionsCompression = []byte("Compression")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *DiffOptions) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtDiffOptionsbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'C':
+
+ if bytes.Equal(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtDiffOptionsCompression:
+ goto handle_Compression
+
+ case ffjtDiffOptionsnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Compression:
+
+ /* handler: j.Compression type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ j.Compression = nil
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ ttypval := archive.Compression(tval)
+ j.Compression = &ttypval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *Layer) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{ "id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteByte(',')
+ if len(j.Names) != 0 {
+ buf.WriteString(`"names":`)
+ if j.Names != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Names {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Parent) != 0 {
+ buf.WriteString(`"parent":`)
+ fflib.WriteJsonString(buf, string(j.Parent))
+ buf.WriteByte(',')
+ }
+ if len(j.Metadata) != 0 {
+ buf.WriteString(`"metadata":`)
+ fflib.WriteJsonString(buf, string(j.Metadata))
+ buf.WriteByte(',')
+ }
+ if len(j.MountLabel) != 0 {
+ buf.WriteString(`"mountlabel":`)
+ fflib.WriteJsonString(buf, string(j.MountLabel))
+ buf.WriteByte(',')
+ }
+ if true {
+ buf.WriteString(`"created":`)
+
+ {
+
+ obj, err = j.Created.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ buf.Write(obj)
+
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.CompressedDigest) != 0 {
+ buf.WriteString(`"compressed-diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.CompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.CompressedSize != 0 {
+ buf.WriteString(`"compressed-size":`)
+ fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.UncompressedDigest) != 0 {
+ buf.WriteString(`"diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.UncompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.UncompressedSize != 0 {
+ buf.WriteString(`"diff-size":`)
+ fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if j.CompressionType != 0 {
+ buf.WriteString(`"compression":`)
+ fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.UIDs) != 0 {
+ buf.WriteString(`"uidset":`)
+ if j.UIDs != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.UIDs {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, false)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.GIDs) != 0 {
+ buf.WriteString(`"gidset":`)
+ if j.GIDs != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.GIDs {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, false)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Flags) != 0 {
+ buf.WriteString(`"flags":`)
+ /* Falling back. type=map[string]interface {} kind=map */
+ err = buf.Encode(j.Flags)
+ if err != nil {
+ return err
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.UIDMap) != 0 {
+ buf.WriteString(`"uidmap":`)
+ if j.UIDMap != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.UIDMap {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ /* Struct fall back. type=idtools.IDMap kind=struct */
+ err = buf.Encode(&v)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.GIDMap) != 0 {
+ buf.WriteString(`"gidmap":`)
+ if j.GIDMap != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.GIDMap {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ /* Struct fall back. type=idtools.IDMap kind=struct */
+ err = buf.Encode(&v)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtLayerbase = iota
+ ffjtLayernosuchkey
+
+ ffjtLayerID
+
+ ffjtLayerNames
+
+ ffjtLayerParent
+
+ ffjtLayerMetadata
+
+ ffjtLayerMountLabel
+
+ ffjtLayerCreated
+
+ ffjtLayerCompressedDigest
+
+ ffjtLayerCompressedSize
+
+ ffjtLayerUncompressedDigest
+
+ ffjtLayerUncompressedSize
+
+ ffjtLayerCompressionType
+
+ ffjtLayerUIDs
+
+ ffjtLayerGIDs
+
+ ffjtLayerFlags
+
+ ffjtLayerUIDMap
+
+ ffjtLayerGIDMap
+)
+
+var ffjKeyLayerID = []byte("id")
+
+var ffjKeyLayerNames = []byte("names")
+
+var ffjKeyLayerParent = []byte("parent")
+
+var ffjKeyLayerMetadata = []byte("metadata")
+
+var ffjKeyLayerMountLabel = []byte("mountlabel")
+
+var ffjKeyLayerCreated = []byte("created")
+
+var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest")
+
+var ffjKeyLayerCompressedSize = []byte("compressed-size")
+
+var ffjKeyLayerUncompressedDigest = []byte("diff-digest")
+
+var ffjKeyLayerUncompressedSize = []byte("diff-size")
+
+var ffjKeyLayerCompressionType = []byte("compression")
+
+var ffjKeyLayerUIDs = []byte("uidset")
+
+var ffjKeyLayerGIDs = []byte("gidset")
+
+var ffjKeyLayerFlags = []byte("flags")
+
+var ffjKeyLayerUIDMap = []byte("uidmap")
+
+var ffjKeyLayerGIDMap = []byte("gidmap")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *Layer) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtLayerbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'd':
+
+ if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'g':
+
+ if bytes.Equal(ffjKeyLayerGIDs, kn) {
+ currentKey = ffjtLayerGIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerGIDMap, kn) {
+ currentKey = ffjtLayerGIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffjKeyLayerUIDs, kn) {
+ currentKey = ffjtLayerUIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerUIDMap, kn) {
+ currentKey = ffjtLayerUIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerGIDMap, kn) {
+ currentKey = ffjtLayerGIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerUIDMap, kn) {
+ currentKey = ffjtLayerUIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerGIDs, kn) {
+ currentKey = ffjtLayerGIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUIDs, kn) {
+ currentKey = ffjtLayerUIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtLayerID:
+ goto handle_ID
+
+ case ffjtLayerNames:
+ goto handle_Names
+
+ case ffjtLayerParent:
+ goto handle_Parent
+
+ case ffjtLayerMetadata:
+ goto handle_Metadata
+
+ case ffjtLayerMountLabel:
+ goto handle_MountLabel
+
+ case ffjtLayerCreated:
+ goto handle_Created
+
+ case ffjtLayerCompressedDigest:
+ goto handle_CompressedDigest
+
+ case ffjtLayerCompressedSize:
+ goto handle_CompressedSize
+
+ case ffjtLayerUncompressedDigest:
+ goto handle_UncompressedDigest
+
+ case ffjtLayerUncompressedSize:
+ goto handle_UncompressedSize
+
+ case ffjtLayerCompressionType:
+ goto handle_CompressionType
+
+ case ffjtLayerUIDs:
+ goto handle_UIDs
+
+ case ffjtLayerGIDs:
+ goto handle_GIDs
+
+ case ffjtLayerFlags:
+ goto handle_Flags
+
+ case ffjtLayerUIDMap:
+ goto handle_UIDMap
+
+ case ffjtLayerGIDMap:
+ goto handle_GIDMap
+
+ case ffjtLayernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Names:
+
+ /* handler: j.Names type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Names = nil
+ } else {
+
+ j.Names = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJNames = string(string(outBuf))
+
+ }
+ }
+
+ j.Names = append(j.Names, tmpJNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Parent:
+
+ /* handler: j.Parent type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Parent = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Metadata:
+
+ /* handler: j.Metadata type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Metadata = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountLabel:
+
+ /* handler: j.MountLabel type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountLabel = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Created:
+
+ /* handler: j.Created type=time.Time kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = j.Created.UnmarshalJSON(tbuf)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedDigest:
+
+ /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.CompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedSize:
+
+ /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedDigest:
+
+ /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.UncompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedSize:
+
+ /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.UncompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressionType:
+
+ /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressionType = archive.Compression(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UIDs:
+
+ /* handler: j.UIDs type=[]uint32 kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.UIDs = nil
+ } else {
+
+ j.UIDs = []uint32{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJUIDs uint32
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJUIDs type=uint32 kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJUIDs = uint32(tval)
+
+ }
+ }
+
+ j.UIDs = append(j.UIDs, tmpJUIDs)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_GIDs:
+
+ /* handler: j.GIDs type=[]uint32 kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.GIDs = nil
+ } else {
+
+ j.GIDs = []uint32{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJGIDs uint32
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJGIDs type=uint32 kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJGIDs = uint32(tval)
+
+ }
+ }
+
+ j.GIDs = append(j.GIDs, tmpJGIDs)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Flags:
+
+ /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Flags = nil
+ } else {
+
+ j.Flags = make(map[string]interface{}, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJFlags interface{}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/
+
+ {
+ /* Falling back. type=interface {} kind=interface */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJFlags)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.Flags[k] = tmpJFlags
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UIDMap:
+
+ /* handler: j.UIDMap type=[]idtools.IDMap kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.UIDMap = nil
+ } else {
+
+ j.UIDMap = []idtools.IDMap{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJUIDMap idtools.IDMap
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJUIDMap type=idtools.IDMap kind=struct quoted=false*/
+
+ {
+ /* Falling back. type=idtools.IDMap kind=struct */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJUIDMap)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.UIDMap = append(j.UIDMap, tmpJUIDMap)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_GIDMap:
+
+ /* handler: j.GIDMap type=[]idtools.IDMap kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.GIDMap = nil
+ } else {
+
+ j.GIDMap = []idtools.IDMap{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJGIDMap idtools.IDMap
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJGIDMap type=idtools.IDMap kind=struct quoted=false*/
+
+ {
+ /* Falling back. type=idtools.IDMap kind=struct */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJGIDMap)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.GIDMap = append(j.GIDMap, tmpJGIDMap)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerMountPoint) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteString(`,"path":`)
+ fflib.WriteJsonString(buf, string(j.MountPoint))
+ buf.WriteString(`,"count":`)
+ fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtlayerMountPointbase = iota
+ ffjtlayerMountPointnosuchkey
+
+ ffjtlayerMountPointID
+
+ ffjtlayerMountPointMountPoint
+
+ ffjtlayerMountPointMountCount
+)
+
+var ffjKeylayerMountPointID = []byte("id")
+
+var ffjKeylayerMountPointMountPoint = []byte("path")
+
+var ffjKeylayerMountPointMountCount = []byte("count")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerMountPoint) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerMountPointbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerMountPointID:
+ goto handle_ID
+
+ case ffjtlayerMountPointMountPoint:
+ goto handle_MountPoint
+
+ case ffjtlayerMountPointMountCount:
+ goto handle_MountCount
+
+ case ffjtlayerMountPointnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountPoint:
+
+ /* handler: j.MountPoint type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountPoint = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountCount:
+
+ /* handler: j.MountCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.MountCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerStore) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtlayerStorebase = iota
+ ffjtlayerStorenosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerStore) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerStorebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerStorenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *simpleGetCloser) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtsimpleGetCloserbase = iota
+ ffjtsimpleGetClosernosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *simpleGetCloser) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtsimpleGetCloserbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtsimpleGetClosernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
index c001fbecb..d28ba9d69 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
@@ -1,23 +1,96 @@
-// +build linux
-
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
+ "errors"
"os"
-
- "github.com/containers/storage/pkg/idtools"
+ "path/filepath"
+ "strings"
)
-// GetStatic returns the home directory for the current user without calling
-// os/user.Current(). This is useful for static-linked binary on glibc-based
-// system, because a call to os/user.Current() in a static binary leads to
-// segfault due to a glibc issue that won't be fixed in a short term.
-// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
-func GetStatic() (string, error) {
- uid := os.Getuid()
- usr, err := idtools.LookupUID(uid)
+// GetRuntimeDir returns XDG_RUNTIME_DIR.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetRuntimeDir() (string, error) {
+ if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
+ return xdgRuntimeDir, nil
+ }
+ return "", errors.New("could not get XDG_RUNTIME_DIR")
+}
+
+// StickRuntimeDirContents sets the sticky bit on files that are under
+// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
+//
+// StickyRuntimeDir returns slice of sticked files.
+// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ runtimeDir, err := GetRuntimeDir()
if err != nil {
- return "", err
+ // ignore error if runtimeDir is empty
+ return nil, nil
+ }
+ runtimeDir, err = filepath.Abs(runtimeDir)
+ if err != nil {
+ return nil, err
+ }
+ var sticked []string
+ for _, f := range files {
+ f, err = filepath.Abs(f)
+ if err != nil {
+ return sticked, err
+ }
+ if strings.HasPrefix(f, runtimeDir+"/") {
+ if err = stick(f); err != nil {
+ return sticked, err
+ }
+ sticked = append(sticked, f)
+ }
+ }
+ return sticked, nil
+}
+
+func stick(f string) error {
+ st, err := os.Stat(f)
+ if err != nil {
+ return err
+ }
+ m := st.Mode()
+ m |= os.ModeSticky
+ return os.Chmod(f, m)
+}
+
+// GetDataHome returns XDG_DATA_HOME.
+// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetDataHome() (string, error) {
+ if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
+ return xdgDataHome, nil
+ }
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", errors.New("could not get either XDG_DATA_HOME or HOME")
+ }
+ return filepath.Join(home, ".local", "share"), nil
+}
+
+// GetConfigHome returns XDG_CONFIG_HOME.
+// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetConfigHome() (string, error) {
+ if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
+ return xdgConfigHome, nil
+ }
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
}
- return usr.Home, nil
+ return filepath.Join(home, ".config"), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
index 6b96b856f..f7bcfb878 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
@@ -2,12 +2,29 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"errors"
)
-// GetStatic is not needed for non-linux systems.
-// (Precisely, it is needed only for glibc-based linux systems.)
-func GetStatic() (string, error) {
- return "", errors.New("homedir.GetStatic() is not supported on this system")
+// GetRuntimeDir is unsupported on non-linux system.
+func GetRuntimeDir() (string, error) {
+ return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
+}
+
+// StickRuntimeDirContents is unsupported on non-linux system.
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
+}
+
+// GetDataHome is unsupported on non-linux system.
+func GetDataHome() (string, error) {
+ return "", errors.New("homedir.GetDataHome() is not supported on this system")
+}
+
+// GetConfigHome is unsupported on non-linux system.
+func GetConfigHome() (string, error) {
+ return "", errors.New("homedir.GetConfigHome() is not supported on this system")
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index f2a20ea8f..dcadb7e8d 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -2,10 +2,12 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"os"
-
- "github.com/opencontainers/runc/libcontainer/user"
+ "os/user"
)
// Key returns the env var name for the user's home dir based on
@@ -17,11 +19,16 @@ func Key() string {
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
+//
+// If linking statically with cgo enabled against glibc, ensure the
+// osusergo build tag is used.
+//
+// If needing to do nss lookups, do not disable cgo or set osusergo.
func Get() string {
home := os.Getenv(Key())
if home == "" {
- if u, err := user.CurrentUser(); err == nil {
- return u.Home
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
}
}
return home
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
index fafdb2bbf..4f2615ed3 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
@@ -1,5 +1,8 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"os"
)
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
index 1ae728a61..372bee732 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -5,9 +5,6 @@ package reexec
import (
"context"
"os/exec"
- "syscall"
-
- "golang.org/x/sys/unix"
)
// Self returns the path to the current process's binary.
@@ -16,28 +13,20 @@ func Self() string {
return "/proc/self/exe"
}
-// Command returns *exec.Cmd which has Path as current binary. Also it setting
-// SysProcAttr.Pdeathsig to SIGTERM.
+// Command returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
cmd := exec.Command(Self())
cmd.Args = args
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Pdeathsig: unix.SIGTERM,
- }
return cmd
}
-// CommandContext returns *exec.Cmd which has Path as current binary, and also
-// sets SysProcAttr.Pdeathsig to SIGTERM.
+// CommandContext returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Pdeathsig: unix.SIGTERM,
- }
return cmd
}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index b84be4424..d978c476d 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/storage/pkg/archive"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
@@ -3275,9 +3276,9 @@ const defaultConfigFile = "/etc/containers/storage.conf"
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
if rootless {
- home, err := homeDir()
- if err != nil {
- return "", errors.Wrapf(err, "cannot determine users homedir")
+ home := homedir.Get()
+ if home == "" {
+ return "", errors.New("cannot determine user's homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 28e0a8d6d..7e4b27d0f 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -4,12 +4,12 @@ import (
"fmt"
"os"
"os/exec"
- "os/user"
"path/filepath"
"strconv"
"strings"
"github.com/BurntSushi/toml"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
@@ -82,9 +82,8 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
}
func getRootlessRuntimeDir(rootlessUID int) (string, error) {
- runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
-
- if runtimeDir != "" {
+ runtimeDir, err := homedir.GetRuntimeDir()
+ if err == nil {
return runtimeDir, nil
}
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUID)
@@ -98,8 +97,8 @@ func getRootlessRuntimeDir(rootlessUID int) (string, error) {
} else {
return tmpDir, nil
}
- home, err := homeDir()
- if err != nil {
+ home := homedir.Get()
+ if home == "" {
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
}
resolvedHome, err := filepath.EvalSymlinks(home)
@@ -117,20 +116,23 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) {
return "", "", err
}
- dataDir := os.Getenv("XDG_DATA_HOME")
- if dataDir == "" {
- home, err := homeDir()
- if err != nil {
- return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
- }
- // runc doesn't like symlinks in the rootfs path, and at least
- // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- dataDir = filepath.Join(resolvedHome, ".local", "share")
+ dataDir, err := homedir.GetDataHome()
+ if err == nil {
+ return dataDir, rootlessRuntime, nil
+ }
+
+ home := homedir.Get()
+ if home == "" {
+ return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ // runc doesn't like symlinks in the rootfs path, and at least
+ // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "cannot resolve %s", home)
}
+ dataDir = filepath.Join(resolvedHome, ".local", "share")
+
return dataDir, rootlessRuntime, nil
}
@@ -246,15 +248,3 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
}
return storageOpts, nil
}
-
-func homeDir() (string, error) {
- home := os.Getenv("HOME")
- if home == "" {
- usr, err := user.Current()
- if err != nil {
- return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- home = usr.HomeDir
- }
- return home, nil
-}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index b0a470f92..3d2fdcd77 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -42,10 +42,10 @@ const (
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
maxMatchOffset = 1 << 15 // The largest match offset
- bTableBits = 18 // Bits used in the big tables
- bTableSize = 1 << bTableBits // Size of the table
- allocHistory = maxMatchOffset * 10 // Size to preallocate for history.
- bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this.
+ bTableBits = 18 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 20 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)
const (
@@ -210,16 +210,14 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
// Reset the encoding table.
func (e *fastGen) Reset() {
- if cap(e.hist) < int(maxMatchOffset*8) {
- l := maxMatchOffset * 8
- // Make it at least 1MB.
- if l < 1<<20 {
- l = 1 << 20
- }
- e.hist = make([]byte, 0, l)
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
}
- // We offset current position so everything will be out of reach
- e.cur += maxMatchOffset + int32(len(e.hist))
e.hist = e.hist[:0]
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 9feea87a3..56ee6dc8b 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -177,6 +177,11 @@ func (w *huffmanBitWriter) flush() {
w.nbits = 0
return
}
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
n := w.nbytes
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
@@ -594,8 +599,8 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
tokens.AddEOB()
}
- // We cannot reuse pure huffman table.
- if w.lastHuffMan && w.lastHeader > 0 {
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
// We will not try to reuse.
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
index 20de8f11f..102fc74c7 100644
--- a/vendor/github.com/klauspost/compress/flate/level1.go
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
@@ -14,6 +16,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
index 7c824431e..dc6b1d314 100644
--- a/vendor/github.com/klauspost/compress/flate/level2.go
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
@@ -16,6 +18,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
index 4153d24c9..1a3ff9b6b 100644
--- a/vendor/github.com/klauspost/compress/flate/level3.go
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastEncL3
type fastEncL3 struct {
fastGen
@@ -13,6 +15,10 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
index c689ac771..f3ecc9c4d 100644
--- a/vendor/github.com/klauspost/compress/flate/level4.go
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -13,7 +13,9 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
-
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
index 14a235612..4e3916825 100644
--- a/vendor/github.com/klauspost/compress/flate/level5.go
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -13,6 +13,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
index cad0c7df7..00a311977 100644
--- a/vendor/github.com/klauspost/compress/flate/level6.go
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -13,6 +13,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index a47051197..53e899124 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -8,6 +8,8 @@ import (
const (
maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
slTableBits = 13
slTableSize = 1 << slTableBits
@@ -25,11 +27,11 @@ func (s *statelessWriter) Close() error {
}
s.closed = true
// Emit EOF block
- return StatelessDeflate(s.dst, nil, true)
+ return StatelessDeflate(s.dst, nil, true, nil)
}
func (s *statelessWriter) Write(p []byte) (n int, err error) {
- err = StatelessDeflate(s.dst, p, false)
+ err = StatelessDeflate(s.dst, p, false, nil)
if err != nil {
return 0, err
}
@@ -59,7 +61,10 @@ var bitWriterPool = sync.Pool{
// StatelessDeflate allows to compress directly to a Writer without retaining state.
// When returning everything will be flushed.
-func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
+// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
@@ -76,35 +81,53 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
return bw.err
}
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
for len(in) > 0 {
todo := in
- if len(todo) > maxStatelessBlock {
- todo = todo[:maxStatelessBlock]
+ if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
}
in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
// Compress
- statelessEnc(&dst, todo)
+ statelessEnc(&dst, todo, int16(len(dict)))
isEof := eof && len(in) == 0
if dst.n == 0 {
- bw.writeStoredHeader(len(todo), isEof)
+ bw.writeStoredHeader(len(uncompressed), isEof)
if bw.err != nil {
return bw.err
}
- bw.writeBytes(todo)
- } else if int(dst.n) > len(todo)-len(todo)>>4 {
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
// If we removed less than 1/16th, huffman compress the block.
- bw.writeBlockHuff(isEof, todo, false)
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
- bw.writeBlockDynamic(&dst, isEof, todo, false)
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ dict = todo[len(todo)-maxStatelessDict:]
+ dst.Reset()
}
if bw.err != nil {
return bw.err
}
- dst.Reset()
}
if !eof {
- // Align.
+ // Align, only a stored block can do that.
bw.writeStoredHeader(0, false)
}
bw.flush()
@@ -130,7 +153,7 @@ func load6416(b []byte, i int16) uint64 {
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
-func statelessEnc(dst *tokens, src []byte) {
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
@@ -144,15 +167,23 @@ func statelessEnc(dst *tokens, src []byte) {
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
- dst.n = uint16(len(src))
+ dst.n = 0
return
}
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
- s := int16(1)
- nextEmit := int16(0)
+ s := startAt + 1
+ nextEmit := startAt
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go
deleted file mode 100644
index ddcbf229e..000000000
--- a/vendor/github.com/mattn/go-shellwords/util_go15.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build !go1.6
-
-package shellwords
-
-import (
- "os"
- "os/exec"
- "runtime"
- "strings"
-)
-
-func shellRun(line, dir string) (string, error) {
- var b []byte
- var err error
- var cmd *exec.Cmd
- if runtime.GOOS == "windows" {
- cmd = exec.Command(os.Getenv("COMSPEC"), "/c", line)
- } else {
- cmd = exec.Command(os.Getenv("SHELL"), "-c", line)
- }
- if dir != "" {
- cmd.Dir = dir
- }
- b, err = cmd.Output()
- if err != nil {
- return "", err
- }
- return strings.TrimSpace(string(b)), nil
-}
diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go
index 3aef2c4d7..988fc9ed2 100644
--- a/vendor/github.com/mattn/go-shellwords/util_posix.go
+++ b/vendor/github.com/mattn/go-shellwords/util_posix.go
@@ -1,4 +1,4 @@
-// +build !windows,go1.6
+// +build !windows
package shellwords
@@ -10,7 +10,10 @@ import (
)
func shellRun(line, dir string) (string, error) {
- shell := os.Getenv("SHELL")
+ var shell string
+ if shell = os.Getenv("SHELL"); shell == "" {
+ shell = "/bin/sh"
+ }
cmd := exec.Command(shell, "-c", line)
if dir != "" {
cmd.Dir = dir
diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go
index cda685091..20546737c 100644
--- a/vendor/github.com/mattn/go-shellwords/util_windows.go
+++ b/vendor/github.com/mattn/go-shellwords/util_windows.go
@@ -1,4 +1,4 @@
-// +build windows,go1.6
+// +build windows
package shellwords
@@ -10,7 +10,10 @@ import (
)
func shellRun(line, dir string) (string, error) {
- shell := os.Getenv("COMSPEC")
+ var shell string
+ if shell = os.Getenv("COMSPEC"); shell == "" {
+ shell = "cmd"
+ }
cmd := exec.Command(shell, "/c", line)
if dir != "" {
cmd.Dir = dir
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 814ea264e..32794b769 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -141,7 +141,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.15.8
+# github.com/containers/storage v1.16.0
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -313,7 +313,7 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.9
github.com/json-iterator/go
-# github.com/klauspost/compress v1.9.8
+# github.com/klauspost/compress v1.10.0
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -324,7 +324,7 @@ github.com/klauspost/compress/zstd/internal/xxhash
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.2
github.com/konsorten/go-windows-terminal-sequences
-# github.com/mattn/go-shellwords v1.0.9
+# github.com/mattn/go-shellwords v1.0.10
github.com/mattn/go-shellwords
# github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/matttproud/golang_protobuf_extensions/pbutil