summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/BurntSushi/toml/.gitignore5
-rw-r--r--vendor/github.com/BurntSushi/toml/.travis.yml15
-rw-r--r--vendor/github.com/BurntSushi/toml/COMPATIBLE4
-rw-r--r--vendor/github.com/BurntSushi/toml/Makefile19
-rw-r--r--vendor/github.com/BurntSushi/toml/README.md74
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go180
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_go116.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_meta.go36
-rw-r--r--vendor/github.com/BurntSushi/toml/deprecated.go33
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go28
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go376
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types.go19
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types_1.1.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/go.mod3
-rw-r--r--vendor/github.com/BurntSushi/toml/go.sum0
-rw-r--r--vendor/github.com/BurntSushi/toml/internal/tz.go36
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go524
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go585
-rw-r--r--vendor/github.com/BurntSushi/toml/session.vim1
-rw-r--r--vendor/github.com/BurntSushi/toml/type_check.go21
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml2
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md310
-rw-r--r--vendor/github.com/containers/buildah/Makefile1
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt90
-rw-r--r--vendor/github.com/containers/buildah/define/types.go2
-rw-r--r--vendor/github.com/containers/buildah/go.mod8
-rw-r--r--vendor/github.com/containers/buildah/go.sum27
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go2
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go5
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf306
-rw-r--r--vendor/github.com/containers/common/version/version.go2
31 files changed, 1825 insertions, 925 deletions
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
index 0cd380037..cd11be965 100644
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -1,5 +1,2 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
toml.test
+/toml-test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
deleted file mode 100644
index 8b8afc4f0..000000000
--- a/vendor/github.com/BurntSushi/toml/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - tip
-install:
- - go install ./...
- - go get github.com/BurntSushi/toml-test
-script:
- - export PATH="$PATH:$HOME/gopath/bin"
- - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
index 6efcfd0ce..f621b0119 100644
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -1,3 +1 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
deleted file mode 100644
index 3600848d3..000000000
--- a/vendor/github.com/BurntSushi/toml/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-install:
- go install ./...
-
-test: install
- go test -v
- toml-test toml-test-decoder
- toml-test -encoder toml-test-encoder
-
-fmt:
- gofmt -w *.go */*.go
- colcheck *.go */*.go
-
-tags:
- find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
- git push origin master
- git push github master
-
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 7c1b37ecc..64410cf75 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -6,27 +6,22 @@ packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
-Spec: https://github.com/toml-lang/toml
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+Documentation: https://godocs.io/github.com/BurntSushi/toml
-Documentation: https://godoc.org/github.com/BurntSushi/toml
+See the [releases page](https://github.com/BurntSushi/toml/releases) for a
+changelog; this information is also in the git tag annotations (e.g. `git show
+v0.4.0`).
-Installation:
+This library requires Go 1.13 or newer; install it with:
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
+ $ go get github.com/BurntSushi/toml
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
+It also comes with a TOML validator CLI tool:
-[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
+ $ go get github.com/BurntSushi/toml/cmd/tomlv
+ $ tomlv some-toml-file.toml
### Testing
@@ -36,8 +31,8 @@ and the encoder.
### Examples
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
+This package works similarly to how the Go standard library handles XML and
+JSON. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
@@ -54,11 +49,11 @@ Which could be defined in Go as:
```go
type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time // requires `import time`
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
}
```
@@ -84,6 +79,9 @@ type TOML struct {
}
```
+Beware that like other most other decoders **only exported fields** are
+considered when encoding and decoding; private fields are silently ignored.
+
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
@@ -103,19 +101,19 @@ Which can be decoded with:
```go
type song struct {
- Name string
- Duration duration
+ Name string
+ Duration duration
}
type songs struct {
- Song []song
+ Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
- log.Fatal(err)
+ log.Fatal(err)
}
for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
@@ -134,6 +132,9 @@ func (d *duration) UnmarshalText(text []byte) error {
}
```
+To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
+a similar way.
+
### More complex usage
Here's an example of how to load the example from the official spec page:
@@ -180,23 +181,23 @@ And the corresponding Go types are:
```go
type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
}
type database struct {
- Server string
- Ports []int
+ Server string
+ Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
@@ -207,7 +208,7 @@ type server struct {
}
type clients struct {
- Data [][]interface{}
+ Data [][]interface{}
Hosts []string
}
```
@@ -216,3 +217,4 @@ Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.
+
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index b0fd51d5b..d3d3b8397 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -1,19 +1,17 @@
package toml
import (
+ "encoding"
"fmt"
"io"
"io/ioutil"
"math"
+ "os"
"reflect"
"strings"
"time"
)
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
@@ -27,30 +25,21 @@ func Unmarshal(p []byte, v interface{}) error {
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+// This type can be used for any value, which will cause decoding to be delayed.
+// You can use the PrimitiveDecode() function to "manually" decode these values.
//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
+// NOTE: The underlying representation of a `Primitive` value is subject to
+// change. Do not rely on it.
//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
+// NOTE: Primitive values are still parsed, so using them will only avoid the
+// overhead of reflection. They can be useful when you don't know the exact type
+// of TOML data until runtime.
type Primitive struct {
undecoded interface{}
context Key
}
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
@@ -68,43 +57,51 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
return md.unify(primValue.undecoded, rvalue(v))
}
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
+// Decoder decodes TOML data.
//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
+// TOML tables correspond to Go structs or maps (dealer's choice – they can be
+// used interchangeably).
//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
+// TOML table arrays correspond to either a slice of structs or a slice of maps.
//
-// TOML datetimes correspond to Go `time.Time` values.
+// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
+// in the local timezone.
//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
+// All other TOML types (float, string, int, bool and array) correspond to the
+// obvious Go types.
//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
+// An exception to the above rules is if a type implements the TextUnmarshaler
+// interface, in which case any primitive TOML value (floats, strings, integers,
+// booleans, datetimes) will be converted to a []byte and given to the value's
+// UnmarshalText method. See the Unmarshaler example for a demonstration with
+// time duration strings.
//
// Key mapping
//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
+// TOML keys can map to either keys in a Go map or field names in a Go struct.
+// The special `toml` struct tag can be used to map TOML keys to struct fields
+// that don't match the key name exactly (see the example). A case insensitive
+// match to struct names will be tried if an exact match can't be found.
//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
+// The mapping between TOML values and Go values is loose. That is, there may
+// exist TOML values that cannot be placed into your representation, and there
+// may be parts of your representation that do not correspond to TOML values.
+// This loose mapping can be made stricter by using the IsDefined and/or
+// Undecoded methods on the MetaData returned.
//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
+// This decoder does not handle cyclic types. Decode will not terminate if a
+// cyclic type is passed.
+type Decoder struct {
+ r io.Reader
+}
+
+// NewDecoder creates a new Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// Decode TOML data in to the pointer `v`.
+func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
@@ -112,7 +109,15 @@ func Decode(data string, v interface{}) (MetaData, error) {
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
- p, err := parse(data)
+
+ // TODO: have parser should read from io.Reader? Or at the very least, make
+ // it read from []byte rather than string
+ data, err := ioutil.ReadAll(dec.r)
+ if err != nil {
+ return MetaData{}, err
+ }
+
+ p, err := parse(string(data))
if err != nil {
return MetaData{}, err
}
@@ -123,24 +128,22 @@ func Decode(data string, v interface{}) (MetaData, error) {
return md, md.unify(p.mapping, indirect(rv))
}
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
+// Decode the TOML data in to the pointer v.
+//
+// See the documentation on Decoder for a description of the decoding process.
+func Decode(data string, v interface{}) (MetaData, error) {
+ return NewDecoder(strings.NewReader(data)).Decode(v)
}
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at path and decode it for you.
+func DecodeFile(path string, v interface{}) (MetaData, error) {
+ fp, err := os.Open(path)
if err != nil {
return MetaData{}, err
}
- return Decode(string(bs), v)
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
}
// unify performs a sort of type unification based on the structure of `rv`,
@@ -149,8 +152,8 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
// Special case. Look for a `Primitive` value.
+ // TODO: #76 would make this superfluous after implemented.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
@@ -170,25 +173,17 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
}
}
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
// Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
}
- // BUG(burntsushi)
+ // TODO:
// The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
+ // array. In particular, the unmarshaler should only be applied to primitive
+ // TOML values. But at this point, it will be applied to all kinds of values
+ // and produce an incorrect error whenever those values are hashes or arrays
+ // (including arrays of tables).
k := rv.Kind()
@@ -277,6 +272,12 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ if k := rv.Type().Key().Kind(); k != reflect.String {
+ return fmt.Errorf(
+ "toml: cannot decode to a map with non-string key type (%s in %q)",
+ k, rv.Type())
+ }
+
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
@@ -312,10 +313,8 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
}
return badtype("slice", data)
}
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
+ if l := datav.Len(); l != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
}
return md.unifySliceArray(datav, rv)
}
@@ -337,11 +336,10 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
+ l := data.Len()
+ for i := 0; i < l; i++ {
+ err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
+ if err != nil {
return err
}
}
@@ -439,7 +437,7 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
@@ -482,7 +480,7 @@ func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
return pv
}
}
@@ -498,12 +496,16 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return true
}
return false
}
+func e(format string, args ...interface{}) error {
+ return fmt.Errorf("toml: "+format, args...)
+}
+
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}
diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go
new file mode 100644
index 000000000..38aa75fdc
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_go116.go
@@ -0,0 +1,18 @@
+// +build go1.16
+
+package toml
+
+import (
+ "io/fs"
+)
+
+// DecodeFS is just like Decode, except it will automatically read the contents
+// of the file at `path` from a fs.FS instance.
+func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
+ fp, err := fsys.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
index b9914a679..ad8899c6c 100644
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -2,9 +2,9 @@ package toml
import "strings"
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
+// MetaData allows access to meta information about TOML data that may not be
+// inferable via reflection. In particular, whether a key has been defined and
+// the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
@@ -13,10 +13,11 @@ type MetaData struct {
context Key // Used only during decoding.
}
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
+// IsDefined reports if the key exists in the TOML data.
+//
+// The key should be specified hierarchically, for example to access the TOML
+// key "a.b.c" you would use:
//
-// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
@@ -41,8 +42,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type returns a string representation of the type of the key specified.
//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
+// Type will return the empty string if given an empty key or a key that does
+// not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
@@ -51,13 +52,11 @@ func (md *MetaData) Type(key ...string) string {
return ""
}
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
+// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
+// values of this type.
type Key []string
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
+func (k Key) String() string { return strings.Join(k, ".") }
func (k Key) maybeQuotedAll() string {
var ss []string
@@ -68,6 +67,9 @@ func (k Key) maybeQuotedAll() string {
}
func (k Key) maybeQuoted(i int) string {
+ if k[i] == "" {
+ return `""`
+ }
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
@@ -76,7 +78,7 @@ func (k Key) maybeQuoted(i int) string {
}
}
if quote {
- return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ return `"` + quotedReplacer.Replace(k[i]) + `"`
}
return k[i]
}
@@ -89,10 +91,10 @@ func (k Key) add(piece string) Key {
}
// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
//
-// The list will have the same order as the keys appeared in the TOML data.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific. The list will have the same
+// order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
new file mode 100644
index 000000000..db89eac1d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -0,0 +1,33 @@
+package toml
+
+import (
+ "encoding"
+ "io"
+)
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextMarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextMarshaler encoding.TextMarshaler
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextUnmarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextUnmarshaler encoding.TextUnmarshaler
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// DEPRECATED!
+//
+// Use NewDecoder(reader).Decode(&v) instead.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ return NewDecoder(r).Decode(v)
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
index b371f396e..099c4a77d 100644
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -1,27 +1,13 @@
/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
+Package toml implements decoding and encoding of TOML files.
-The specification implemented: https://github.com/toml-lang/toml
+This package supports TOML v1.0.0, as listed on https://toml.io
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
+There is also support for delaying decoding with the Primitive type, and
+querying the set of keys in a TOML document with the MetaData type.
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
+The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
+and can be used to verify if TOML document is valid. It can also be used to
+print the type of each key.
*/
package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index d905c21a2..10d88ac63 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -2,48 +2,92 @@ package toml
import (
"bufio"
+ "encoding"
"errors"
"fmt"
"io"
+ "math"
"reflect"
"sort"
"strconv"
"strings"
"time"
+
+ "github.com/BurntSushi/toml/internal"
)
type tomlEncodeError struct{ error }
var (
- errArrayMixedElementTypes = errors.New(
- "toml: cannot encode array with mixed element types")
- errArrayNilElement = errors.New(
- "toml: cannot encode array with nil element")
- errNonString = errors.New(
- "toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "toml: cannot encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "toml: TOML array element cannot contain a table")
- errNoKey = errors.New(
- "toml: top-level values must be Go maps or structs")
- errAnything = errors.New("") // used in testing
+ errArrayNilElement = errors.New("toml: cannot encode array with nil element")
+ errNonString = errors.New("toml: cannot encode a map with non-string key type")
+ errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
+ errNoKey = errors.New("toml: top-level values must be Go maps or structs")
+ errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
+ "\x00", `\u0000`,
+ "\x01", `\u0001`,
+ "\x02", `\u0002`,
+ "\x03", `\u0003`,
+ "\x04", `\u0004`,
+ "\x05", `\u0005`,
+ "\x06", `\u0006`,
+ "\x07", `\u0007`,
+ "\b", `\b`,
+ "\t", `\t`,
+ "\n", `\n`,
+ "\x0b", `\u000b`,
+ "\f", `\f`,
+ "\r", `\r`,
+ "\x0e", `\u000e`,
+ "\x0f", `\u000f`,
+ "\x10", `\u0010`,
+ "\x11", `\u0011`,
+ "\x12", `\u0012`,
+ "\x13", `\u0013`,
+ "\x14", `\u0014`,
+ "\x15", `\u0015`,
+ "\x16", `\u0016`,
+ "\x17", `\u0017`,
+ "\x18", `\u0018`,
+ "\x19", `\u0019`,
+ "\x1a", `\u001a`,
+ "\x1b", `\u001b`,
+ "\x1c", `\u001c`,
+ "\x1d", `\u001d`,
+ "\x1e", `\u001e`,
+ "\x1f", `\u001f`,
+ "\x7f", `\u007f`,
)
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
+// Encoder encodes a Go to a TOML document.
+//
+// The mapping between Go values and TOML values should be precisely the same as
+// for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.
+//
+// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
+// are encoded first.
+//
+// Go maps will be sorted alphabetically by key for deterministic output.
//
-// The indentation level can be controlled with the Indent field.
+// Encoding Go values without a corresponding TOML representation will return an
+// error. Examples of this includes maps with non-string keys, slices with nil
+// elements, embedded non-struct types, and nested slices containing maps or
+// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
+// is okay, as is []map[string][]string).
+//
+// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
+// keys are silently discarded.
type Encoder struct {
- // A single indentation level. By default it is two spaces.
+ // The string to use for a single indentation level. The default is two
+ // spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
@@ -51,8 +95,7 @@ type Encoder struct {
w *bufio.Writer
}
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
+// NewEncoder create a new Encoder.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
@@ -60,29 +103,10 @@ func NewEncoder(w io.Writer) *Encoder {
}
}
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
+// Encode writes a TOML representation of the Go value to the Encoder's writer.
//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
+// An error is returned if the value given cannot be encoded to a valid TOML
+// document.
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
@@ -110,9 +134,13 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
+ switch t := rv.Interface().(type) {
+ case time.Time, encoding.TextMarshaler:
+ enc.writeKeyValue(key, rv, false)
+ return
+ // TODO: #76 would make this superfluous after implemented.
+ case Primitive:
+ enc.encode(key, reflect.ValueOf(t.undecoded))
return
}
@@ -123,12 +151,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
+ enc.writeKeyValue(key, rv, false)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
- enc.keyEqElement(key, rv)
+ enc.writeKeyValue(key, rv, false)
}
case reflect.Interface:
if rv.IsNil() {
@@ -148,22 +176,32 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct:
enc.eTable(key, rv)
default:
- panic(e("unsupported type for key '%s': %s", key, k))
+ encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
}
}
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
+// eElement encodes any value that can be an array element.
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
+ case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
+ format := time.RFC3339Nano
+ switch v.Location() {
+ case internal.LocalDatetime:
+ format = "2006-01-02T15:04:05.999999999"
+ case internal.LocalDate:
+ format = "2006-01-02"
+ case internal.LocalTime:
+ format = "15:04:05.999999999"
+ }
+ switch v.Location() {
+ default:
+ enc.wf(v.Format(format))
+ case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
+ enc.wf(v.In(time.UTC).Format(format))
+ }
return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
+ case encoding.TextMarshaler:
+ // Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
@@ -171,32 +209,49 @@ func (enc *Encoder) eElement(rv reflect.Value) {
}
return
}
+
switch rv.Kind() {
+ case reflect.String:
+ enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64:
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ f := rv.Float()
+ if math.IsNaN(f) {
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+ }
case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ f := rv.Float()
+ if math.IsNaN(f) {
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+ }
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
+ case reflect.Struct:
+ enc.eStruct(nil, rv, true)
+ case reflect.Map:
+ enc.eMap(nil, rv, true)
case reflect.Interface:
enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
default:
- panic(e("unexpected primitive type: %s", rv.Kind()))
+ encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
}
}
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
+// By the TOML spec, all floats must have a decimal with at least one number on
+// either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
@@ -230,16 +285,14 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if isNil(trv) {
continue
}
- panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
- enc.eMapOrStruct(key, trv)
+ enc.eMapOrStruct(key, trv, false)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
@@ -249,21 +302,22 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
- enc.eMapOrStruct(key, rv)
+ enc.eMapOrStruct(key, rv, false)
}
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
- enc.eMap(key, rv)
+ enc.eMap(key, rv, inline)
case reflect.Struct:
- enc.eStruct(key, rv)
+ enc.eStruct(key, rv, inline)
default:
+ // Should never happen?
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
@@ -281,57 +335,76 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
}
}
- var writeMapKeys = func(mapKeys []string) {
+ var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
+ for i, mapKey := range mapKeys {
+ val := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(val) {
continue
}
- enc.encode(key.add(mapKey), mrv)
+
+ if inline {
+ enc.writeKeyValue(Key{mapKey}, val, true)
+ if trailC || i != len(mapKeys)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(mapKey), val)
+ }
}
}
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
+
+ if inline {
+ enc.wf("{")
+ }
+ writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
+ writeMapKeys(mapKeysSub, false)
+ if inline {
+ enc.wf("}")
+ }
}
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
+ // a field that creates a new table then all keys under it will be in that
// table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ //
+ // Fields is a [][]int: for fieldsDirect this always has one entry (the
+ // struct index). For fieldsSub it contains two entries: the parent field
+ // index from tv, and the field indexes for the fields of the sub.
+ var (
+ rt = rv.Type()
+ fieldsDirect, fieldsSub [][]int
+ addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ )
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
- // skip unexported fields
- if f.PkgPath != "" && !f.Anonymous {
+ if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
continue
}
+
frv := rv.Field(i)
+
+ // Treat anonymous struct fields with tag names as though they are
+ // not anonymous, like encoding/json does.
+ //
+ // Non-struct anonymous fields use the normal encoding logic.
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
- // Treat anonymous struct fields with
- // tag names as though they are not
- // anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
- addFields(t, frv, f.Index)
+ addFields(t, frv, append(start, f.Index...))
continue
}
case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct &&
- getOptions(f.Tag).name == "" {
+ if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), f.Index)
+ addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
}
continue
}
- // Fall through to the normal field encoding logic below
- // for non-struct anonymous fields.
}
}
@@ -344,35 +417,49 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
}
addFields(rt, rv, nil)
- var writeFields = func(fields [][]int) {
+ writeFields := func(fields [][]int) {
for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
+ fieldType := rt.FieldByIndex(fieldIndex)
+ fieldVal := rv.FieldByIndex(fieldIndex)
+
+ if isNil(fieldVal) { /// Don't write anything for nil fields.
continue
}
- opts := getOptions(sft.Tag)
+ opts := getOptions(fieldType.Tag)
if opts.skip {
continue
}
- keyName := sft.Name
+ keyName := fieldType.Name
if opts.name != "" {
keyName = opts.name
}
- if opts.omitempty && isEmpty(sf) {
+ if opts.omitempty && isEmpty(fieldVal) {
continue
}
- if opts.omitzero && isZero(sf) {
+ if opts.omitzero && isZero(fieldVal) {
continue
}
- enc.encode(key.add(keyName), sf)
+ if inline {
+ enc.writeKeyValue(Key{keyName}, fieldVal, true)
+ if fieldIndex[0] != len(fields)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(keyName), fieldVal)
+ }
}
}
+
+ if inline {
+ enc.wf("{")
+ }
writeFields(fieldsDirect)
writeFields(fieldsSub)
+ if inline {
+ enc.wf("}")
+ }
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
@@ -411,13 +498,26 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
- case TextMarshaler:
+ case encoding.TextMarshaler:
return tomlString
default:
+ // Someone used a pointer receiver: we can make it work for pointer
+ // values.
+ if rv.CanAddr() {
+ _, ok := rv.Addr().Interface().(encoding.TextMarshaler)
+ if ok {
+ return tomlString
+ }
+ }
return tomlHash
}
default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
+ _, ok := rv.Interface().(encoding.TextMarshaler)
+ if ok {
+ return tomlString
+ }
+ encPanic(errors.New("unsupported type: " + rv.Kind().String()))
+ panic("") // Need *some* return value
}
}
@@ -430,29 +530,18 @@ func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
- }
+ /// Don't allow nil.
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
+ if tomlTypeOfGo(rv.Index(i)) == nil {
encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
}
}
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
- }
+
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
}
return firstType
}
@@ -511,14 +600,20 @@ func (enc *Encoder) newline() {
}
}
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+// Write a key/value pair:
+//
+// key = <any value>
+//
+// If inline is true it won't add a newline at the end.
+func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 {
encPanic(errNoKey)
}
- panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
- enc.newline()
+ if !inline {
+ enc.newline()
+ }
}
func (enc *Encoder) wf(format string, v ...interface{}) {
@@ -553,16 +648,3 @@ func isNil(rv reflect.Value) bool {
return false
}
}
-
-func panicIfInvalidKey(key Key) {
- for _, k := range key {
- if len(k) == 0 {
- encPanic(e("Key '%s' is not a valid table name. Key names "+
- "cannot be empty.", key.maybeQuotedAll()))
- }
- }
-}
-
-func isValidKeyName(s string) bool {
- return len(s) != 0
-}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd60..000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d04..000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/BurntSushi/toml/go.mod b/vendor/github.com/BurntSushi/toml/go.mod
new file mode 100644
index 000000000..82989481d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/go.mod
@@ -0,0 +1,3 @@
+module github.com/BurntSushi/toml
+
+go 1.16
diff --git a/vendor/github.com/BurntSushi/toml/go.sum b/vendor/github.com/BurntSushi/toml/go.sum
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/go.sum
diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go
new file mode 100644
index 000000000..022f15bc2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/internal/tz.go
@@ -0,0 +1,36 @@
+package internal
+
+import "time"
+
+// Timezones used for local datetime, date, and time TOML types.
+//
+// The exact way times and dates without a timezone should be interpreted is not
+// well-defined in the TOML specification and left to the implementation. These
+// defaults to current local timezone offset of the computer, but this can be
+// changed by changing these variables before decoding.
+//
+// TODO:
+// Ideally we'd like to offer people the ability to configure the used timezone
+// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
+// tricky: the reason we use three different variables for this is to support
+// round-tripping – without these specific TZ names we wouldn't know which
+// format to use.
+//
+// There isn't a good way to encode this right now though, and passing this sort
+// of information also ties in to various related issues such as string format
+// encoding, encoding of comments, etc.
+//
+// So, for the time being, just put this in internal until we can write a good
+// comprehensive API for doing all of this.
+//
+// The reason they're exported is because they're referred from in e.g.
+// internal/tag.
+//
+// Note that this behaviour is valid according to the TOML spec as the exact
+// behaviour is left up to implementations.
+var (
+ localOffset = func() int { _, o := time.Now().Zone(); return o }()
+ LocalDatetime = time.FixedZone("datetime-local", localOffset)
+ LocalDate = time.FixedZone("date-local", localOffset)
+ LocalTime = time.FixedZone("time-local", localOffset)
+)
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index e0a742a88..adc4eb5d5 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -2,6 +2,8 @@ package toml
import (
"fmt"
+ "reflect"
+ "runtime"
"strings"
"unicode"
"unicode/utf8"
@@ -29,6 +31,7 @@ const (
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
+ itemKeyEnd
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
@@ -64,9 +67,9 @@ type lexer struct {
state stateFn
items chan item
- // Allow for backing up up to three runes.
+ // Allow for backing up up to four runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
- prevWidths [3]int
+ prevWidths [4]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
@@ -93,6 +96,7 @@ func (lx *lexer) nextItem() item {
return item
default:
lx.state = lx.state(lx)
+ //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
}
}
}
@@ -137,7 +141,7 @@ func (lx *lexer) emitTrim(typ itemType) {
func (lx *lexer) next() (r rune) {
if lx.atEOF {
- panic("next called after EOF")
+ panic("BUG in lexer: next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
@@ -147,12 +151,19 @@ func (lx *lexer) next() (r rune) {
if lx.input[lx.pos] == '\n' {
lx.line++
}
+ lx.prevWidths[3] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
- if lx.nprev < 3 {
+ if lx.nprev < 4 {
lx.nprev++
}
+
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+ if r == utf8.RuneError {
+ lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
+ return utf8.RuneError
+ }
+
lx.prevWidths[0] = w
lx.pos += w
return r
@@ -163,18 +174,19 @@ func (lx *lexer) ignore() {
lx.start = lx.pos
}
-// backup steps back one rune. Can be called only twice between calls to next.
+// backup steps back one rune. Can be called 4 times between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
- panic("backed up too far")
+ panic("BUG in lexer: backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
+ lx.prevWidths[2] = lx.prevWidths[3]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
@@ -269,8 +281,9 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF)
return nil
}
- return lx.errorf("expected a top-level item to end with a newline, "+
- "comment, or EOF, but got %q instead", r)
+ return lx.errorf(
+ "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
+ r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@@ -297,8 +310,9 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("expected end of table array name delimiter %q, "+
- "but got %q instead", arrayTableEnd, r)
+ return lx.errorf(
+ "expected end of table array name delimiter %q, but got %q instead",
+ arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
@@ -308,30 +322,17 @@ func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
- return lx.errorf("unexpected end of table name " +
- "(table names cannot be empty)")
+ return lx.errorf("unexpected end of table name (table names cannot be empty)")
case r == tableSep:
- return lx.errorf("unexpected table separator " +
- "(table names cannot be empty)")
+ return lx.errorf("unexpected table separator (table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
- return lexValue // reuse string lexing
+ return lexQuotedName
default:
- return lexBareTableName
- }
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
- r := lx.next()
- if isBareKeyChar(r) {
- return lexBareTableName
+ lx.push(lexTableNameEnd)
+ return lexBareName
}
- lx.backup()
- lx.emit(itemText)
- return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
@@ -347,63 +348,101 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd:
return lx.pop()
default:
- return lx.errorf("expected '.' or ']' to end table name, "+
- "but got %q instead", r)
+ return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
}
}
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only 'a' inside 'a.b'.
+func lexBareName(lx *lexer) stateFn {
+ r := lx.next()
+ if isBareKeyChar(r) {
+ return lexBareName
+ }
+ lx.backup()
+ lx.emit(itemText)
+ return lx.pop()
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only '"a"' inside '"a".b'.
+func lexQuotedName(lx *lexer) stateFn {
+ r := lx.next()
switch {
- case r == keySep:
- return lx.errorf("unexpected key separator %q", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.push(lexKeyEnd)
- return lexValue // reuse string lexing
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case r == stringStart:
+ lx.ignore() // ignore the '"'
+ return lexString
+ case r == rawStringStart:
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected value")
default:
+ return lx.errorf("expected value but found %q instead", r)
+ }
+}
+
+// lexKeyStart consumes all key parts until a '='.
+func lexKeyStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '=': key name appears blank")
+ case r == '.':
+ return lx.errorf("unexpected '.': keys cannot start with a '.'")
+ case r == stringStart || r == rawStringStart:
lx.ignore()
+ fallthrough
+ default: // Bare key
lx.emit(itemKeyStart)
- return lexBareKey
+ return lexKeyNameStart
}
}
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
- return lexBareKey
- case isWhitespace(r):
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- case r == keySep:
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
+func lexKeyNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '='")
+ case r == '.':
+ return lx.errorf("unexpected '.'")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexKeyEnd)
+ return lexQuotedName
default:
- return lx.errorf("bare keys cannot contain %q", r)
+ lx.push(lexKeyEnd)
+ return lexBareName
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
switch r := lx.next(); {
- case r == keySep:
- return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected key separator %q", keySep)
+ case r == '.':
+ lx.ignore()
+ return lexKeyNameStart
+ case r == '=':
+ lx.emit(itemKeyEnd)
+ return lexSkip(lx, lexValue)
default:
- return lx.errorf("expected key separator %q, but got %q instead",
- keySep, r)
+ return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@@ -450,10 +489,15 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the "'"
return lexRawString
- case '+', '-':
- return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
+ case 'i', 'n':
+ if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
+ lx.emit(itemFloat)
+ return lx.pop()
+ }
+ case '-', '+':
+ return lexDecimalNumberStart
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
@@ -463,6 +507,9 @@ func lexValue(lx *lexer) stateFn {
lx.backup()
return lexBool
}
+ if r == eof {
+ return lx.errorf("unexpected EOF; expected value")
+ }
return lx.errorf("expected value but found %q instead", r)
}
@@ -507,9 +554,8 @@ func lexArrayValueEnd(lx *lexer) stateFn {
return lexArrayEnd
}
return lx.errorf(
- "expected a comma or array terminator %q, but got %q instead",
- arrayEnd, r,
- )
+ "expected a comma or array terminator %q, but got %s instead",
+ arrayEnd, runeOrEOF(r))
}
// lexArrayEnd finishes the lexing of an array.
@@ -546,8 +592,7 @@ func lexInlineTableValue(lx *lexer) stateFn {
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
+ switch r := lx.next(); {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
@@ -557,12 +602,25 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
return lexCommentStart
case r == comma:
lx.ignore()
+ lx.skip(isWhitespace)
+ if lx.peek() == '}' {
+ return lx.errorf("trailing comma not allowed in inline tables")
+ }
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
+ default:
+ return lx.errorf(
+ "expected a comma or an inline table terminator %q, but got %s instead",
+ inlineTableEnd, runeOrEOF(r))
+ }
+}
+
+func runeOrEOF(r rune) string {
+ if r == eof {
+ return "end of file"
}
- return lx.errorf("expected a comma or an inline table terminator %q, "+
- "but got %q instead", inlineTableEnd, r)
+ return "'" + string(r) + "'"
}
// lexInlineTableEnd finishes the lexing of an inline table.
@@ -579,7 +637,9 @@ func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected '"'`)
+ case isControl(r) || r == '\r':
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
@@ -598,19 +658,40 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
- switch lx.next() {
+ r := lx.next()
+ switch r {
case eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected '"""'`)
+ case '\r':
+ if lx.peek() != '\n' {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineString
case '\\':
return lexMultilineStringEscape
case stringEnd:
+ /// Found " → try to read two more "".
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
- lx.backup()
+ /// Peek ahead: the string can contain " and "", including at the
+ /// end: """str"""""
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == stringEnd {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ if strings.HasSuffix(lx.current(), `"""""`) {
+ return lx.errorf(`unexpected '""""""'`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineString
+ }
+
+ lx.backup() /// backup: don't include the """ in the item.
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
- lx.next()
+ lx.next() /// Read over ''' again and discard it.
lx.next()
lx.next()
lx.ignore()
@@ -619,6 +700,10 @@ func lexMultilineString(lx *lexer) stateFn {
lx.backup()
}
}
+
+ if isControl(r) {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
return lexMultilineString
}
@@ -628,7 +713,9 @@ func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected "'"`)
+ case isControl(r) || r == '\r':
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
@@ -645,17 +732,38 @@ func lexRawString(lx *lexer) stateFn {
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
- switch lx.next() {
+ r := lx.next()
+ switch r {
case eof:
- return lx.errorf("unexpected EOF")
+ return lx.errorf(`unexpected EOF; expected "'''"`)
+ case '\r':
+ if lx.peek() != '\n' {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineRawString
case rawStringEnd:
+ /// Found ' → try to read two more ''.
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
- lx.backup()
+ /// Peek ahead: the string can contain ' and '', including at the
+ /// end: '''str'''''
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == rawStringEnd {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ if strings.HasSuffix(lx.current(), "'''''") {
+ return lx.errorf(`unexpected "''''''"`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineRawString
+ }
+
+ lx.backup() /// backup: don't include the ''' in the item.
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
- lx.next()
+ lx.next() /// Read over ''' again and discard it.
lx.next()
lx.next()
lx.ignore()
@@ -664,6 +772,10 @@ func lexMultilineRawString(lx *lexer) stateFn {
lx.backup()
}
}
+
+ if isControl(r) {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
return lexMultilineRawString
}
@@ -694,6 +806,10 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough
case '"':
fallthrough
+ case ' ', '\t':
+ // Inside """ .. """ strings you can use \ to escape newlines, and any
+ // amount of whitespace can be between the \ and \n.
+ fallthrough
case '\\':
return lx.pop()
case 'u':
@@ -701,8 +817,7 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U':
return lexLongUnicodeEscape
}
- return lx.errorf("invalid escape character %q; only the following "+
- "escape characters are allowed: "+
+ return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
@@ -711,8 +826,9 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
- return lx.errorf(`expected four hexadecimal digits after '\u', `+
- "but got %q instead", lx.current())
+ return lx.errorf(
+ `expected four hexadecimal digits after '\u', but got %q instead`,
+ lx.current())
}
}
return lx.pop()
@@ -723,28 +839,33 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
- return lx.errorf(`expected eight hexadecimal digits after '\U', `+
- "but got %q instead", lx.current())
+ return lx.errorf(
+ `expected eight hexadecimal digits after '\U', but got %q instead`,
+ lx.current())
}
}
return lx.pop()
}
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
+// lexNumberOrDateStart processes the first character of a value which begins
+// with a digit. It exists to catch values starting with '0', so that
+// lexBaseNumberOrDate can differentiate base prefixed integers from other
+// types.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
switch r {
- case '_':
- return lexNumber
- case 'e', 'E':
- return lexFloat
- case '.':
- return lx.errorf("floats must start with a digit, not '.'")
+ case '0':
+ return lexBaseNumberOrDate
}
- return lx.errorf("expected a digit but got %q", r)
+
+ if !isDigit(r) {
+ // The only way to reach this state is if the value starts
+ // with a digit, so specifically treat anything else as an
+ // error.
+ return lx.errorf("expected a digit but got %q", r)
+ }
+
+ return lexNumberOrDate
}
// lexNumberOrDate consumes either an integer, float or datetime.
@@ -754,10 +875,10 @@ func lexNumberOrDate(lx *lexer) stateFn {
return lexNumberOrDate
}
switch r {
- case '-':
+ case '-', ':':
return lexDatetime
case '_':
- return lexNumber
+ return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
}
@@ -775,41 +896,156 @@ func lexDatetime(lx *lexer) stateFn {
return lexDatetime
}
switch r {
- case '-', 'T', ':', '.', 'Z', '+':
+ case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
return lexDatetime
}
lx.backup()
- lx.emit(itemDatetime)
+ lx.emitTrim(itemDatetime)
return lx.pop()
}
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // We MUST see a digit. Even floats have to start with a digit.
+// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
+func lexHexInteger(lx *lexer) stateFn {
r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("floats must start with a digit, not '.'")
+ if isHexadecimal(r) {
+ return lexHexInteger
+ }
+ switch r {
+ case '_':
+ return lexHexInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
+func lexOctalInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isOctal(r) {
+ return lexOctalInteger
+ }
+ switch r {
+ case '_':
+ return lexOctalInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
+func lexBinaryInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isBinary(r) {
+ return lexBinaryInteger
+ }
+ switch r {
+ case '_':
+ return lexBinaryInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes a decimal float or integer.
+func lexDecimalNumber(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDecimalNumber
+ }
+ switch r {
+ case '.', 'e', 'E':
+ return lexFloat
+ case '_':
+ return lexDecimalNumber
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes the first digit of a number beginning with a sign.
+// It assumes the sign has already been consumed. Values which start with a sign
+// are only allowed to be decimal integers or floats.
+//
+// The special "nan" and "inf" values are also recognized.
+func lexDecimalNumberStart(lx *lexer) stateFn {
+ r := lx.next()
+
+ // Special error cases to give users better error messages
+ switch r {
+ case 'i':
+ if !lx.accept('n') || !lx.accept('f') {
+ return lx.errorf("invalid float: '%s'", lx.current())
}
- return lx.errorf("expected a digit but got %q", r)
+ lx.emit(itemFloat)
+ return lx.pop()
+ case 'n':
+ if !lx.accept('a') || !lx.accept('n') {
+ return lx.errorf("invalid float: '%s'", lx.current())
+ }
+ lx.emit(itemFloat)
+ return lx.pop()
+ case '0':
+ p := lx.peek()
+ switch p {
+ case 'b', 'o', 'x':
+ return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
+ }
+ case '.':
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+
+ if isDigit(r) {
+ return lexDecimalNumber
}
- return lexNumber
+
+ return lx.errorf("expected a digit but got %q", r)
}
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
+// lexBaseNumberOrDate differentiates between the possible values which
+// start with '0'. It assumes that before reaching this state, the initial '0'
+// has been consumed.
+func lexBaseNumberOrDate(lx *lexer) stateFn {
r := lx.next()
+ // Note: All datetimes start with at least two digits, so we don't
+ // handle date characters (':', '-', etc.) here.
if isDigit(r) {
- return lexNumber
+ return lexNumberOrDate
}
switch r {
case '_':
- return lexNumber
+ // Can only be decimal, because there can't be an underscore
+ // between the '0' and the base designator, and dates can't
+ // contain underscores.
+ return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
+ case 'b':
+ r = lx.peek()
+ if !isBinary(r) {
+ lx.errorf("not a binary number: '%s%c'", lx.current(), r)
+ }
+ return lexBinaryInteger
+ case 'o':
+ r = lx.peek()
+ if !isOctal(r) {
+ lx.errorf("not an octal number: '%s%c'", lx.current(), r)
+ }
+ return lexOctalInteger
+ case 'x':
+ r = lx.peek()
+ if !isHexadecimal(r) {
+ lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+ }
+ return lexHexInteger
}
lx.backup()
@@ -867,21 +1103,22 @@ func lexCommentStart(lx *lexer) stateFn {
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
+ switch r := lx.next(); {
+ case isNL(r) || r == eof:
+ lx.backup()
lx.emit(itemText)
return lx.pop()
+ case isControl(r):
+ return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
+ default:
+ return lexComment
}
- lx.next()
- return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
+ lx.ignore()
+ return nextState
}
// isWhitespace returns true if `r` is a whitespace character according
@@ -894,6 +1131,16 @@ func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
+// Control characters except \n, \t
+func isControl(r rune) bool {
+ switch r {
+ case '\t', '\r', '\n':
+ return false
+ default:
+ return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+ }
+}
+
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
@@ -904,6 +1151,14 @@ func isHexadecimal(r rune) bool {
(r >= 'A' && r <= 'F')
}
+func isOctal(r rune) bool {
+ return r >= '0' && r <= '7'
+}
+
+func isBinary(r rune) bool {
+ return r == '0' || r == '1'
+}
+
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
@@ -912,6 +1167,17 @@ func isBareKeyChar(r rune) bool {
r == '-'
}
+func (s stateFn) String() string {
+ name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
+ if i := strings.LastIndexByte(name, '.'); i > -1 {
+ name = name[i+1:]
+ }
+ if s == nil {
+ name = "<nil>"
+ }
+ return name + "()"
+}
+
func (itype itemType) String() string {
switch itype {
case itemError:
@@ -938,12 +1204,18 @@ func (itype itemType) String() string {
return "TableEnd"
case itemKeyStart:
return "KeyStart"
+ case itemKeyEnd:
+ return "KeyEnd"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
+ case itemInlineTableStart:
+ return "InlineTableStart"
+ case itemInlineTableEnd:
+ return "InlineTableEnd"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index 50869ef92..d9ae5db94 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -1,12 +1,14 @@
package toml
import (
+ "errors"
"fmt"
"strconv"
"strings"
"time"
- "unicode"
"unicode/utf8"
+
+ "github.com/BurntSushi/toml/internal"
)
type parser struct {
@@ -14,39 +16,54 @@ type parser struct {
types map[string]tomlType
lx *lexer
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
-
- // the base key name for everything except hashes
- currentKey string
-
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
+ ordered []Key // List of keys in the order that they appear in the TOML data.
+ context Key // Full key for the current hash in scope.
+ currentKey string // Base key name for everything except hashes.
+ approxLine int // Rough approximation of line number
+ implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
}
-type parseError string
+// ParseError is used when a file can't be parsed: for example invalid integer
+// literals, duplicate keys, etc.
+type ParseError struct {
+ Message string
+ Line int
+ LastKey string
+}
-func (pe parseError) Error() string {
- return string(pe)
+func (pe ParseError) Error() string {
+ return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ pe.Line, pe.LastKey, pe.Message)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
- if err, ok = r.(parseError); ok {
+ if err, ok = r.(ParseError); ok {
return
}
panic(r)
}
}()
+ // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
+ // which mangles stuff.
+ if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+ data = data[2:]
+ }
+
+ // Examine first few bytes for NULL bytes; this probably means it's a UTF-16
+ // file (second byte in surrogate pair being NULL). Again, do this here to
+ // avoid having to deal with UTF-8/16 stuff in the lexer.
+ ex := 6
+ if len(data) < 6 {
+ ex = len(data)
+ }
+ if strings.ContainsRune(data[:ex], 0) {
+ return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
+ }
+
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
@@ -66,13 +83,17 @@ func parse(data string) (p *parser, err error) {
}
func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
+ msg := fmt.Sprintf(format, v...)
+ panic(ParseError{
+ Message: msg,
+ Line: p.approxLine,
+ LastKey: p.current(),
+ })
}
func (p *parser) next() item {
it := p.lx.nextItem()
+ //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
if it.typ == itemError {
p.panicf("%s", it.val)
}
@@ -97,44 +118,63 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) {
switch item.typ {
- case itemCommentStart:
+ case itemCommentStart: // # ..
p.approxLine = item.line
p.expect(itemText)
- case itemTableStart:
- kg := p.next()
- p.approxLine = kg.line
+ case itemTableStart: // [ .. ]
+ name := p.next()
+ p.approxLine = name.line
var key Key
- for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
+ for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
}
- p.assertEqual(itemTableEnd, kg.typ)
+ p.assertEqual(itemTableEnd, name.typ)
- p.establishContext(key, false)
+ p.addContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.next()
- p.approxLine = kg.line
+ case itemArrayTableStart: // [[ .. ]]
+ name := p.next()
+ p.approxLine = name.line
var key Key
- for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
+ for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
}
- p.assertEqual(itemArrayTableEnd, kg.typ)
+ p.assertEqual(itemArrayTableEnd, name.typ)
- p.establishContext(key, true)
+ p.addContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.next()
- p.approxLine = kname.line
- p.currentKey = p.keyString(kname)
-
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
+ case itemKeyStart: // key = ..
+ outerContext := p.context
+ /// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
+ k := p.next()
+ p.approxLine = k.line
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
+
+ /// The current key is the last part.
+ p.currentKey = key[len(key)-1]
+
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key[:len(key)-1]
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
+ }
+
+ /// Set value.
+ val, typ := p.value(p.next(), false)
+ p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
+
+ /// Remove the context we added (preserving any context from [tbl] lines).
+ p.context = outerContext
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
@@ -148,180 +188,253 @@ func (p *parser) keyString(it item) string {
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
- s, _ := p.value(it)
+ s, _ := p.value(it, false)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
- panic("unreachable")
}
+ panic("unreachable")
}
+var datetimeRepl = strings.NewReplacer(
+ "z", "Z",
+ "t", "T",
+ " ", "T")
+
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
+func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
- trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
- return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemInteger:
+ return p.valueInteger(it)
+ case itemFloat:
+ return p.valueFloat(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
+ default:
+ p.bug("Expected boolean value, but got '%s'.", it.val)
}
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits",
- it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- // Distinguish integer values. Normally, it'd be a bug if the lexer
- // provides an invalid integer, but it's possible that the number is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
+ case itemDatetime:
+ return p.valueDatetime(it)
+ case itemArray:
+ return p.valueArray(it)
+ case itemInlineTableStart:
+ return p.valueInlineTable(it, parentIsArray)
+ default:
+ p.bug("Unexpected value type: %s", it.typ)
+ }
+ panic("unreachable")
+}
+
+func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+ if !numUnderscoresOK(it.val) {
+ p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
+ }
+ if numHasLeadingZero(it.val) {
+ p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
+ }
+
+ num, err := strconv.ParseInt(it.val, 0, 64)
+ if err != nil {
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
}
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- parts := strings.FieldsFunc(it.val, func(r rune) bool {
- switch r {
- case '.', 'e', 'E':
- return true
- }
- return false
- })
- for _, part := range parts {
- if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be "+
- "surrounded by digits", it.val)
- }
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
}
- if !numPeriodsOK(it.val) {
- // As a special case, numbers like '123.' or '1.e2',
- // which are valid as far as Go/strconv are concerned,
- // must be rejected because TOML says that a fractional
- // part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed "+
- "by one or more digits", it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseFloat(val, 64)
- if err != nil {
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.panicf("Invalid float value: %q", it.val)
- }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
}
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- var t time.Time
- var ok bool
- var err error
- for _, format := range []string{
- "2006-01-02T15:04:05Z07:00",
- "2006-01-02T15:04:05",
- "2006-01-02",
- } {
- t, err = time.ParseInLocation(format, it.val, time.Local)
- if err == nil {
- ok = true
- break
- }
+ }
+ if len(parts) > 0 && numHasLeadingZero(parts[0]) {
+ p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+ val = "nan"
+ }
+ num, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.panicf("Invalid float value: %q", it.val)
}
- if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+var dtTypes = []struct {
+ fmt string
+ zone *time.Location
+}{
+ {time.RFC3339Nano, time.Local},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
+ {"2006-01-02", internal.LocalDate},
+ {"15:04:05.999999999", internal.LocalTime},
+}
+
+func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+ it.val = datetimeRepl.Replace(it.val)
+ var (
+ t time.Time
+ ok bool
+ err error
+ )
+ for _, dt := range dtTypes {
+ t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
+ if err == nil {
+ ok = true
+ break
}
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
+ }
+ if !ok {
+ p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+}
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
+func (p *parser) valueArray(it item) (interface{}, tomlType) {
+ p.setType(p.currentKey, tomlArray)
+
+ // p.setType(p.currentKey, typ)
+ var (
+ array []interface{}
+ types []tomlType
+ )
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it, true)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, tomlArray
+}
+
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+ var (
+ hash = make(map[string]interface{})
+ outerContext = p.context
+ outerKey = p.currentKey
+ )
+
+ p.context = append(p.context, p.currentKey)
+ prevContext := p.context
+ p.currentKey = ""
+
+ p.addImplicit(p.context)
+ p.addContext(p.context, parentIsArray)
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
+ /// Loop over all table key/value pairs.
+ for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
}
- return array, p.typeOfArray(types)
- case itemInlineTableStart:
- var (
- hash = make(map[string]interface{})
- outerContext = p.context
- outerKey = p.currentKey
- )
- p.context = append(p.context, p.currentKey)
- p.currentKey = ""
- for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
- if it.typ != itemKeyStart {
- p.bug("Expected key start but instead found %q, around line %d",
- it.val, p.approxLine)
- }
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
+ /// Read all key parts.
+ k := p.next()
+ p.approxLine = k.line
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
- // retrieve key
- k := p.next()
- p.approxLine = k.line
- kname := p.keyString(k)
+ /// The current key is the last part.
+ p.currentKey = key[len(key)-1]
- // retrieve value
- p.currentKey = kname
- val, typ := p.value(p.next())
- // make sure we keep metadata up to date
- p.setType(kname, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- hash[kname] = val
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key[:len(key)-1]
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
}
- p.context = outerContext
- p.currentKey = outerKey
- return hash, tomlHash
+
+ /// Set the value.
+ val, typ := p.value(p.next(), false)
+ p.set(p.currentKey, val, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ hash[p.currentKey] = val
+
+ /// Restore context.
+ p.context = prevContext
}
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
+ p.context = outerContext
+ p.currentKey = outerKey
+ return hash, tomlHash
+}
+
+// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
+// +/- signs, and base prefixes.
+func numHasLeadingZero(s string) bool {
+ if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
+ return true
+ }
+ if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
+ return true
+ }
+ return false
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
+ switch s {
+ case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
+ return true
+ }
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
- accept = false
- continue
}
- accept = true
+
+ // isHexadecimal is a superset of all the permissable characters
+ // surrounding an underscore.
+ accept = isHexadecimal(r)
}
return accept
}
@@ -338,13 +451,12 @@ func numPeriodsOK(s string) bool {
return !period
}
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
+// Set the current context of the parser, where the context is either a hash or
+// an array of hashes, depending on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
+func (p *parser) addContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
@@ -383,7 +495,7 @@ func (p *parser) establishContext(key Key, array bool) {
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
+ hashContext[k] = make([]map[string]interface{}, 0, 4)
}
// Add a new table. But make sure the key hasn't already been used
@@ -391,8 +503,7 @@ func (p *parser) establishContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
+ p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
@@ -400,15 +511,22 @@ func (p *parser) establishContext(key Key, array bool) {
p.context = append(p.context, key[len(key)-1])
}
+// set calls setValue and setType.
+func (p *parser) set(key string, val interface{}, typ tomlType) {
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+}
+
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
+ var (
+ tmpHash interface{}
+ ok bool
+ hash = p.mapping
+ keyContext Key
+ )
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
@@ -422,24 +540,26 @@ func (p *parser) setValue(key string, value interface{}) {
case map[string]interface{}:
hash = t
default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
+ p.panicf("Key '%s' has already been defined.", keyContext)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ // Normally redefining keys isn't allowed, but the key could have been
+ // defined implicitly and it's allowed to be redefined concretely. (See
+ // the `valid/implicit-and-explicit-after.toml` in toml-test)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
+ if p.isArray(keyContext) {
+ p.removeImplicit(keyContext)
+ hash[key] = value
+ return
+ }
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
@@ -449,6 +569,7 @@ func (p *parser) setValue(key string, value interface{}) {
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
+
hash[key] = value
}
@@ -468,21 +589,15 @@ func (p *parser) setType(key string, typ tomlType) {
p.types[keyContext.String()] = typ
}
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
+// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
+// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
+func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
+func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
+func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
+func (p *parser) addImplicitContext(key Key) {
+ p.addImplicit(key)
+ p.addContext(key, false)
}
// current returns the full key name of the current context.
@@ -497,20 +612,54 @@ func (p *parser) current() string {
}
func stripFirstNewline(s string) string {
- if len(s) == 0 || s[0] != '\n' {
- return s
+ if len(s) > 0 && s[0] == '\n' {
+ return s[1:]
+ }
+ if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
+ return s[2:]
}
- return s[1:]
+ return s
}
-func stripEscapedWhitespace(s string) string {
- esc := strings.Split(s, "\\\n")
- if len(esc) > 1 {
- for i := 1; i < len(esc); i++ {
- esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+// Remove newlines inside triple-quoted strings if a line ends with "\".
+func stripEscapedNewlines(s string) string {
+ split := strings.Split(s, "\n")
+ if len(split) < 1 {
+ return s
+ }
+
+ escNL := false // Keep track of the last non-blank line was escaped.
+ for i, line := range split {
+ line = strings.TrimRight(line, " \t\r")
+
+ if len(line) == 0 || line[len(line)-1] != '\\' {
+ split[i] = strings.TrimRight(split[i], "\r")
+ if !escNL && i != len(split)-1 {
+ split[i] += "\n"
+ }
+ continue
+ }
+
+ escBS := true
+ for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
+ escBS = !escBS
+ }
+ if escNL {
+ line = strings.TrimLeft(line, " \t\r")
+ }
+ escNL = !escBS
+
+ if escBS {
+ split[i] += "\n"
+ continue
+ }
+
+ split[i] = line[:len(line)-1] // Remove \
+ if len(split)-1 > i {
+ split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
}
}
- return strings.Join(esc, "")
+ return strings.Join(split, "")
}
func (p *parser) replaceEscapes(str string) string {
@@ -533,6 +682,9 @@ func (p *parser) replaceEscapes(str string) string {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
+ case ' ', '\t':
+ p.panicf("invalid escape: '\\%c'", s[r])
+ return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
@@ -585,8 +737,3 @@ func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
}
return rune(hex)
}
-
-func isStringType(ty itemType) bool {
- return ty == itemString || ty == itemMultilineString ||
- ty == itemRawString || ty == itemRawMultilineString
-}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164be0..000000000
--- a/vendor/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
index c73f8afc1..d56aa80fa 100644
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but "+
- "arrays must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 73ab4fc11..860d258b2 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -98,7 +98,7 @@ smoke_task:
# the git-validate tool which are difficult to debug and fix.
skip: $CIRRUS_PR == ''
- timeout_in: 10m
+ timeout_in: 30m
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index 591ba831c..8b92ddbe1 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -221,6 +221,316 @@
* Reset upstream branch to dev version
* If destination does not exists, do not throw error
+## v1.22.0 (2021-08-02)
+ c/image, c/storage, c/common vendor before Podman 3.3 release
+ WIP: tests: new assert()
+ Proposed patch for 3399 (shadowutils)
+ Fix handling of --restore shadow-utils
+ build(deps): bump github.com/containers/image/v5 from 5.13.2 to 5.14.0
+ runtime-flag (debug) test: handle old & new runc
+ build(deps): bump github.com/containers/storage from 1.32.6 to 1.33.0
+ Allow dst and destination for target in secret mounts
+ Multi-arch: Always push updated version-tagged img
+ Add a few tests on cgroups V2
+ imagebuildah.stageExecutor.prepare(): remove pseudonym check
+ refine dangling filter
+ Chown with environment variables not set should fail
+ Just restore protections of shadow-utils
+ build(deps): bump github.com/opencontainers/runc from 1.0.0 to 1.0.1
+ Remove specific kernel version number requirement from install.md
+ Multi-arch image workflow: Make steps generic
+ chroot: fix environment value leakage to intermediate processes
+ Update nix pin with `make nixpkgs`
+ buildah source - create and manage source images
+ Update cirrus-cron notification GH workflow
+ Reuse code from containers/common/pkg/parse
+ Cirrus: Freshen VM images
+ build(deps): bump github.com/containers/storage from 1.32.5 to 1.32.6
+ Fix excludes exception begining with / or ./
+ Fix syntax for --manifest example
+ build(deps): bump github.com/onsi/gomega from 1.13.0 to 1.14.0
+ vendor containers/common@main
+ Cirrus: Drop dependence on fedora-minimal
+ Adjust conformance-test error-message regex
+ Workaround appearance of differing debug messages
+ Cirrus: Install docker from package cache
+ build(deps): bump github.com/containers/ocicrypt from 1.1.1 to 1.1.2
+ Switch rusagelogfile to use options.Out
+ build(deps): bump github.com/containers/storage from 1.32.4 to 1.32.5
+ Turn stdio back to blocking when command finishes
+ Add support for default network creation
+ Cirrus: Updates for master->main rename
+ Change references from master to main
+ Add `--env` and `--workingdir` flags to run command
+ build(deps): bump github.com/opencontainers/runc
+ [CI:DOCS] buildah bud: spelling --ignore-file requires parameter
+ [CI:DOCS] push/pull: clarify supported transports
+ Remove unused function arguments
+ Create mountOptions for mount command flags
+ Extract version command implementation to function
+ Add --json flags to `mount` and `version` commands
+ build(deps): bump github.com/containers/storage from 1.32.2 to 1.32.3
+ build(deps): bump github.com/containers/common from 0.40.0 to 0.40.1
+ copier.Put(): set xattrs after ownership
+ buildah add/copy: spelling
+ build(deps): bump github.com/containers/common from 0.39.0 to 0.40.0
+ buildah copy and buildah add should support .containerignore
+ Remove unused util.StartsWithValidTransport
+ Fix documentation of the --format option of buildah push
+ Don't use alltransports.ParseImageName with known transports
+ build(deps): bump github.com/containers/image/v5 from 5.13.0 to 5.13.1
+ man pages: clarify `rmi` removes dangling parents
+ tests: make it easer to override the location of the copy helper
+ build(deps): bump github.com/containers/image/v5 from 5.12.0 to 5.13.0
+ [CI:DOCS] Fix links to c/image master branch
+ imagebuildah: use the specified logger for logging preprocessing warnings
+ Fix copy into workdir for a single file
+ Fix docs links due to branch rename
+ Update nix pin with `make nixpkgs`
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.2 to 1.7.3
+ build(deps): bump github.com/opencontainers/selinux from 1.8.1 to 1.8.2
+ build(deps): bump go.etcd.io/bbolt from 1.3.5 to 1.3.6
+ build(deps): bump github.com/containers/storage from 1.32.1 to 1.32.2
+ build(deps): bump github.com/mattn/go-shellwords from 1.0.11 to 1.0.12
+ build(deps): bump github.com/onsi/ginkgo from 1.16.3 to 1.16.4
+ fix(docs): typo
+ Move to v1.22.0-dev
+ Fix handling of auth.json file while in a user namespace
+ Add rusage-logfile flag to optionally send rusage to a file
+ imagebuildah: redo step logging
+ build(deps): bump github.com/onsi/ginkgo from 1.16.2 to 1.16.3
+ build(deps): bump github.com/containers/storage from 1.32.0 to 1.32.1
+ Add volumes to make running buildah within a container easier
+ build(deps): bump github.com/onsi/gomega from 1.12.0 to 1.13.0
+ Add and use a "copy" helper instead of podman load/save
+ Bump github.com/containers/common from 0.38.4 to 0.39.0
+ containerImageRef/containerImageSource: don't buffer uncompressed layers
+ containerImageRef(): squashed images have no parent images
+ Sync. workflow across skopeo, buildah, and podman
+ Bump github.com/containers/storage from 1.31.1 to 1.31.2
+ Bump github.com/opencontainers/runc from 1.0.0-rc94 to 1.0.0-rc95
+ Bump to v1.21.1-dev [NO TESTS NEEDED]
+
+## v1.21.0 (2021-05-19)
+ Don't blow up if cpp detects errors
+ Vendor in containers/common v0.38.4
+ Remove 'buildah run --security-opt' from completion
+ update c/common
+ Fix handling of --default-mounts-file
+ update vendor of containers/storage v1.31.1
+ Bump github.com/containers/storage from 1.30.3 to 1.31.0
+ Send logrus messages back to caller when building
+ github: Fix bad repo. ref in workflow config
+ Check earlier for bad image tags name
+ buildah bud: fix containers/podman/issues/10307
+ Bump github.com/containers/storage from 1.30.1 to 1.30.3
+ Cirrus: Support [CI:DOCS] test skipping
+ Notification email for cirrus-cron build failures
+ Bump github.com/opencontainers/runc from 1.0.0-rc93 to 1.0.0-rc94
+ Fix race condition
+ Fix copy race while walking paths
+ Preserve ownership of lower directory when doing an overlay mount
+ Bump github.com/onsi/gomega from 1.11.0 to 1.12.0
+ Update nix pin with `make nixpkgs`
+ codespell cleanup
+ Multi-arch github-action workflow unification
+ Bump github.com/containers/image/v5 from 5.11.1 to 5.12.0
+ Bump github.com/onsi/ginkgo from 1.16.1 to 1.16.2
+ imagebuildah: ignore signatures when tagging images
+ update to latest libimage
+ Bump github.com/containers/common from 0.37.0 to 0.37.1
+ Bump github.com/containers/storage from 1.30.0 to 1.30.1
+ Upgrade to GitHub-native Dependabot
+ Document location of auth.json file if XDG_RUNTIME_DIR is not set
+ run.bats: fix flake in run-user test
+ Cirrus: Update F34beta -> F34
+ pr-should-include-tests: try to make work in buildah
+ runUsingRuntime: when relaying error from the runtime, mention that
+ Run(): avoid Mkdir() into the rootfs
+ imagebuildah: replace archive with chrootarchive
+ imagebuildah.StageExecutor.volumeCacheSaveVFS(): set up bind mounts
+ conformance: use :Z with transient mounts when SELinux is enabled
+ bud.bats: fix a bats warning
+ imagebuildah: create volume directories when using overlays
+ imagebuildah: drop resolveSymlink()
+ namespaces test - refactoring and cleanup
+ Refactor 'idmapping' system test
+ Cirrus: Update Ubuntu images to 21.04
+ Tiny fixes in bud system tests
+ Add compabitility wrappers for removed packages
+ Fix expected message at pulling image
+ Fix system tests of 'bud' subcommand
+ [CI:DOCS] Update steps for CentOS runc users
+ Add support for secret mounts
+ Add buildah manifest rm command
+ restore push/pull and util API
+ [CI:DOCS] Remove older distro docs
+ Rename rhel secrets to subscriptions
+ vendor in openshift/imagebuilder
+ Remove buildah bud --loglevel ...
+ use new containers/common/libimage package
+ Fix copier when using globs
+ Test namespace flags of 'bud' subcommand
+ Add system test of 'bud' subcommand
+ Output names of multiple tags in buildah bud
+ push to docker test: don't get fooled by podman
+ copier: add Remove()
+ build(deps): bump github.com/containers/image/v5 from 5.10.5 to 5.11.1
+ Restore log timestamps
+ Add system test of 'buildah help' with a tiny fix
+ tests: copy.bats: fix infinite hang
+ Do not force hard code to crun in rootless mode
+ build(deps): bump github.com/openshift/imagebuilder from 1.2.0 to 1.2.1
+ build(deps): bump github.com/containers/ocicrypt from 1.1.0 to 1.1.1
+ build(deps): bump github.com/containers/common from 0.35.4 to 0.36.0
+ Fix arg missing warning in bud
+ Check without flag in 'from --cgroup-parent' test
+ Minor fixes to Buildah as a library tutorial documentation
+ Add system test of 'buildah version' for packaged buildah
+ Add a few system tests of 'buildah from'
+ Log the final error with %+v at logging level "trace"
+ copier: add GetOptions.NoCrossDevice
+ Update nix pin with `make nixpkgs`
+ Bump to v1.20.2-dev
+
+## v1.20.1 (2021-04-13)
+ Run container with isolation type set at 'from'
+ bats helpers.bash - minor refactoring
+ Bump containers/storage vendor to v1.29.0
+ build(deps): bump github.com/onsi/ginkgo from 1.16.0 to 1.16.1
+ Cirrus: Update VMs w/ F34beta
+ CLI add/copy: add a --from option
+ build(deps): bump github.com/onsi/ginkgo from 1.15.2 to 1.16.0
+ Add authentication system tests for 'commit' and 'bud'
+ fix local image lookup for custom platform
+ Double-check existence of OCI runtimes
+ Cirrus: Make use of shared get_ci_vm container
+ Add system tests of "buildah run"
+ Update nix pin with `make nixpkgs`
+ Remove some stuttering on returns errors
+ Setup alias for --tty to --terminal
+ Add conformance tests for COPY /...
+ Put a few more minutes on the clock for the CI conformance test
+ Add a conformance test for COPY --from $symlink
+ Add conformance tests for COPY ""
+ Check for symlink in builtin volume
+ Sort all mounts by destination directory
+ System-test cleanup
+ Export parse.Platform string to be used by podman-remote
+ blobcache: fix sequencing error
+ build(deps): bump github.com/containers/common from 0.35.3 to 0.35.4
+ Fix URL in demos/buildah_multi_stage.sh
+ Add a few system tests
+ [NO TESTS NEEDED] Use --recurse-modules when building git context
+ Bump to v1.20.1-dev
+
+## v1.20.0 (2021-03-25)
+ * vendor in containers/storage v1.28.1
+ * build(deps): bump github.com/containers/common from 0.35.2 to 0.35.3
+ * tests: prefetch: use buildah, not podman, for pulls
+ * Use faster way to check image tag existence during multi-arch build
+ * Add information about multi-arch images to the Readme
+ * COPY --chown: expand the conformance test
+ * pkg/chrootuser: use a bufio.Scanner
+ * [CI:DOCS] Fix rootful typo in docs
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.1 to 1.15.2
+ * Add documentation and testing for .containerignore
+ * build(deps): bump github.com/sirupsen/logrus from 1.8.0 to 1.8.1
+ * build(deps): bump github.com/hashicorp/go-multierror from 1.1.0 to 1.1.1
+ * Lookup Containerfile if user specifies a directory
+ * Add Tag format placeholder to docs
+ * copier: ignore sockets
+ * image: propagate errors from extractRootfs
+ * Remove system test of 'buildah containers -a'
+ * Clarify userns options are usable only as root in man pages
+ * Fix system test of 'containers -a'
+ * Remove duplicated code in addcopy
+ * build(deps): bump github.com/onsi/ginkgo from 1.15.0 to 1.15.1
+ * build(deps): bump github.com/onsi/gomega from 1.10.5 to 1.11.0
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.1 to 1.7.2
+ * Update multi-arch buildah build setup with new logic
+ * Update nix pin with `make nixpkgs`
+ * overlay.bats: fix the "overlay source permissions" test
+ * imagebuildah: use overlay for volumes when using overlay
+ * Make PolicyMap and PullPolicy names align
+ * copier: add GetOptions.IgnoreUnreadable
+ * Check local image to match system context
+ * fix: Containerfiles - smaller set of userns u/gids
+ * Set upperdir permissions based on source
+ * Shrink the vendoring size of pkc/cli
+ * Clarify image name match failure message
+ * ADD/COPY: create the destination directory first, chroot to it
+ * copier.GetOptions: add NoDerefSymLinks
+ * copier: add an Eval function
+ * Update system test for 'from --cap-add/drop'
+ * copier: fix a renaming bug
+ * copier: return child process stderr if we can't JSON decode the response
+ * Add some system tests
+ * build(deps): bump github.com/containers/storage from 1.26.0 to 1.27.0
+ * complement add/copy --chmod documentation
+ * buildah login and logout, do not need to enter user namespace
+ * Add multi-arch image build
+ * chmod/chown added/fixed in bash completions
+ * OWNERS: add @lsm5
+ * buildah add/copy --chmod dockerfile implementation
+ * bump github.com/openshift/imagebuilder from 1.1.8 to 1.2.0
+ * buildah add/copy --chmod cli implementation for files and urls
+ * Make sure we set the buildah version label
+ * Isolation strings, should match user input
+ * [CI:DOCS] buildah-from.md: remove dup arch,os
+ * build(deps): bump github.com/containers/image/v5 from 5.10.2 to 5.10.3
+ * Cirrus: Temp. disable prior-fedora (F32) testing
+ * pr-should-include-tests: recognized "renamed" tests
+ * build(deps): bump github.com/sirupsen/logrus from 1.7.0 to 1.8.0
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.0 to 1.7.1
+ * build(deps): bump github.com/containers/common from 0.34.2 to 0.35.0
+ * Fix reaping of stages with no instructions
+ * add stale bot
+ * Add base image name to comment
+ * build(deps): bump github.com/spf13/cobra from 1.1.1 to 1.1.3
+ * Don't fail copy to emptydir
+ * buildah: use volatile containers
+ * vendor: update containers/storage
+ * Eliminate the use of containers/building import in pkg subdirs
+ * Add more support for removing config
+ * Improve messages about --cache-from not being supported
+ * Revert patch to allow COPY/ADD of empty dirs.
+ * Don't fail copy to emptydir
+ * Fix tutorial for rootless mode
+ * Fix caching layers with build args
+ * Vendor in containers/image v5.10.2
+ * build(deps): bump github.com/containers/common from 0.34.0 to 0.34.2
+ * build(deps): bump github.com/onsi/ginkgo from 1.14.2 to 1.15.0
+ * 'make validate': require PRs to include tests
+ * build(deps): bump github.com/onsi/gomega from 1.10.4 to 1.10.5
+ * build(deps): bump github.com/containers/storage from 1.24.5 to 1.25.0
+ * Use chown function for U volume flag from containers/common repository
+ * --iidfile: print hash prefix
+ * bump containernetworking/cni to v0.8.1 - fix for CVE-2021-20206
+ * run: fix check for host pid namespace
+ * Finish plumbing for buildah bud --manifest
+ * buildah manifest add localimage should work
+ * Stop testing directory permissions with latest docker
+ * Fix build arg check
+ * build(deps): bump github.com/containers/ocicrypt from 1.0.3 to 1.1.0
+ * [ci:docs] Fix man page for buildah push
+ * Update nix pin with `make nixpkgs`
+ * Bump to containers/image v5.10.1
+ * Rebuild layer if a change in ARG is detected
+ * Bump golang.org/x/crypto to the latest
+ * Add Ashley and Urvashi to Approvers
+ * local image lookup by digest
+ * Use build-arg ENV val from local environment if set
+ * Pick default OCI Runtime from containers.conf
+ * Added required devel packages
+ * Cirrus: Native OSX Build
+ * Cirrus: Two minor cleanup items
+ * Workaround for RHEL gating test failure
+ * build(deps): bump github.com/stretchr/testify from 1.6.1 to 1.7.0
+ * build(deps): bump github.com/mattn/go-shellwords from 1.0.10 to 1.0.11
+ * Reset upstream branch to dev version
+ * If destination does not exists, do not throw error
+
## v1.19.0 (2021-01-08)
Update vendor of containers/storage and containers/common
Buildah inspect should be able to inspect manifests
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index 245f1ad50..2fa5020ee 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -107,6 +107,7 @@ validate: install.tools
./tests/validate/git-validation.sh
./hack/xref-helpmsgs-manpages
./tests/validate/pr-should-include-tests
+ ./tests/validate/buildahimages-are-sane
.PHONY: install.tools
install.tools:
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 3bde05a3e..6755535b1 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,93 @@
+- Changelog for v1.22.0 (2021-08-02)
+ * c/image, c/storage, c/common vendor before Podman 3.3 release
+ * WIP: tests: new assert()
+ * Proposed patch for 3399 (shadowutils)
+ * Fix handling of --restore shadow-utils
+ * build(deps): bump github.com/containers/image/v5 from 5.13.2 to 5.14.0
+ * runtime-flag (debug) test: handle old & new runc
+ * build(deps): bump github.com/containers/storage from 1.32.6 to 1.33.0
+ * Allow dst and destination for target in secret mounts
+ * Multi-arch: Always push updated version-tagged img
+ * Add a few tests on cgroups V2
+ * imagebuildah.stageExecutor.prepare(): remove pseudonym check
+ * refine dangling filter
+ * Chown with environment variables not set should fail
+ * Just restore protections of shadow-utils
+ * build(deps): bump github.com/opencontainers/runc from 1.0.0 to 1.0.1
+ * Remove specific kernel version number requirement from install.md
+ * Multi-arch image workflow: Make steps generic
+ * chroot: fix environment value leakage to intermediate processes
+ * Update nix pin with `make nixpkgs`
+ * buildah source - create and manage source images
+ * Update cirrus-cron notification GH workflow
+ * Reuse code from containers/common/pkg/parse
+ * Cirrus: Freshen VM images
+ * build(deps): bump github.com/containers/storage from 1.32.5 to 1.32.6
+ * Fix excludes exception begining with / or ./
+ * Fix syntax for --manifest example
+ * build(deps): bump github.com/onsi/gomega from 1.13.0 to 1.14.0
+ * vendor containers/common@main
+ * Cirrus: Drop dependence on fedora-minimal
+ * Adjust conformance-test error-message regex
+ * Workaround appearance of differing debug messages
+ * Cirrus: Install docker from package cache
+ * build(deps): bump github.com/containers/ocicrypt from 1.1.1 to 1.1.2
+ * Switch rusagelogfile to use options.Out
+ * build(deps): bump github.com/containers/storage from 1.32.4 to 1.32.5
+ * Turn stdio back to blocking when command finishes
+ * Add support for default network creation
+ * Cirrus: Updates for master->main rename
+ * Change references from master to main
+ * Add `--env` and `--workingdir` flags to run command
+ * build(deps): bump github.com/opencontainers/runc
+ * [CI:DOCS] buildah bud: spelling --ignore-file requires parameter
+ * [CI:DOCS] push/pull: clarify supported transports
+ * Remove unused function arguments
+ * Create mountOptions for mount command flags
+ * Extract version command implementation to function
+ * Add --json flags to `mount` and `version` commands
+ * build(deps): bump github.com/containers/storage from 1.32.2 to 1.32.3
+ * build(deps): bump github.com/containers/common from 0.40.0 to 0.40.1
+ * copier.Put(): set xattrs after ownership
+ * buildah add/copy: spelling
+ * build(deps): bump github.com/containers/common from 0.39.0 to 0.40.0
+ * buildah copy and buildah add should support .containerignore
+ * Remove unused util.StartsWithValidTransport
+ * Fix documentation of the --format option of buildah push
+ * Don't use alltransports.ParseImageName with known transports
+ * build(deps): bump github.com/containers/image/v5 from 5.13.0 to 5.13.1
+ * man pages: clarify `rmi` removes dangling parents
+ * tests: make it easer to override the location of the copy helper
+ * build(deps): bump github.com/containers/image/v5 from 5.12.0 to 5.13.0
+ * [CI:DOCS] Fix links to c/image master branch
+ * imagebuildah: use the specified logger for logging preprocessing warnings
+ * Fix copy into workdir for a single file
+ * Fix docs links due to branch rename
+ * Update nix pin with `make nixpkgs`
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.2 to 1.7.3
+ * build(deps): bump github.com/opencontainers/selinux from 1.8.1 to 1.8.2
+ * build(deps): bump go.etcd.io/bbolt from 1.3.5 to 1.3.6
+ * build(deps): bump github.com/containers/storage from 1.32.1 to 1.32.2
+ * build(deps): bump github.com/mattn/go-shellwords from 1.0.11 to 1.0.12
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.3 to 1.16.4
+ * fix(docs): typo
+ * Move to v1.22.0-dev
+ * Fix handling of auth.json file while in a user namespace
+ * Add rusage-logfile flag to optionally send rusage to a file
+ * imagebuildah: redo step logging
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.2 to 1.16.3
+ * build(deps): bump github.com/containers/storage from 1.32.0 to 1.32.1
+ * Add volumes to make running buildah within a container easier
+ * build(deps): bump github.com/onsi/gomega from 1.12.0 to 1.13.0
+ * Add and use a "copy" helper instead of podman load/save
+ * Bump github.com/containers/common from 0.38.4 to 0.39.0
+ * containerImageRef/containerImageSource: don't buffer uncompressed layers
+ * containerImageRef(): squashed images have no parent images
+ * Sync. workflow across skopeo, buildah, and podman
+ * Bump github.com/containers/storage from 1.31.1 to 1.31.2
+ * Bump github.com/opencontainers/runc from 1.0.0-rc94 to 1.0.0-rc95
+ * Bump to v1.21.1-dev [NO TESTS NEEDED]
+
- Changelog for v1.21.0 (2021-05-19)
* Don't blow up if cpp detects errors
* Vendor in containers/common v0.38.4
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
index 7b6085339..27f536a89 100644
--- a/vendor/github.com/containers/buildah/define/types.go
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.22.0-dev"
+ Version = "1.22.0"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 68721b73c..a8e3e96a3 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -4,10 +4,10 @@ go 1.12
require (
github.com/containernetworking/cni v0.8.1
- github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf
- github.com/containers/image/v5 v5.13.2
+ github.com/containers/common v0.42.1
+ github.com/containers/image/v5 v5.15.0
github.com/containers/ocicrypt v1.1.2
- github.com/containers/storage v1.32.6
+ github.com/containers/storage v1.33.1
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
@@ -36,7 +36,7 @@ require (
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20210603125802-9665404d3644
+ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22
k8s.io/klog v1.0.0 // indirect
)
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index ce7eb6c74..055b8a386 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -73,7 +73,6 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.17/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.20 h1:ZTwcx3NS8n07kPf/JZ1qwU6vnjhVPMUWlXBF8r9UxrE=
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
@@ -135,7 +134,6 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.1/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -225,10 +223,11 @@ github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf h1:z0ciG0ByyJG3WCBpLYd2XLThCC7UBaH7GeSfXY4sAqc=
-github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf/go.mod h1:Ba5YVNCnyX6xDtg1JqEHa2EMVMW5UbHmIyEqsEwpeGE=
-github.com/containers/image/v5 v5.13.2 h1:AgYunV/9d2fRkrmo23wH2MkqeHolFd6oQCkK+1PpuFA=
-github.com/containers/image/v5 v5.13.2/go.mod h1:GkWursKDlDcUIT7L7vZf70tADvZCk/Ga0wgS0MuF0ag=
+github.com/containers/common v0.42.1 h1:ADOZrVAS8ZY5hBAvr/GoRoPv5Z7TBkxWgxQEXQjlqac=
+github.com/containers/common v0.42.1/go.mod h1:AaF3ipZfgezsctDuhzLkq4Vl+LkEy7J74ikh2HSXDsg=
+github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU=
+github.com/containers/image/v5 v5.15.0 h1:NduhN20ptHNlf0uRny5iTJa2OodB9SLMEB4hKKbzBBs=
+github.com/containers/image/v5 v5.15.0/go.mod h1:gzdBcooi6AFdiqfzirUqv90hUyHyI0MMdaqKzACKr2s=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -236,9 +235,10 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/containers/storage v1.32.2/go.mod h1:YIBxxjfXZTi04Ah49sh1uSGfmT1V89+I5i3deRobzQo=
-github.com/containers/storage v1.32.6 h1:NqdFRewXO/PYPjgCAScoigZc5QUA21yapSEj6kqD8cw=
github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw=
+github.com/containers/storage v1.33.0/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
+github.com/containers/storage v1.33.1 h1:RHUPZ7vQxwoeOoMoKUDsVun4f9Wi8BTXmr/wQiruBYU=
+github.com/containers/storage v1.33.1/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@@ -251,7 +251,6 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
@@ -515,7 +514,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
@@ -636,7 +634,6 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM=
github.com/opencontainers/runc v1.0.0/go.mod h1:MU2S3KEB2ZExnhnAQYbwjdYV6HwKtDlNbA2Z2OeNDeA=
github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
@@ -780,8 +777,8 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v7 v7.0.2 h1:eN6AD/ytv1nqCO7Dm8MO0/pGMKmMyH/WMnTJhAUuc/w=
-github.com/vbauerster/mpb/v7 v7.0.2/go.mod h1:Mnq3gESXJ9eQhccbGZDggJ1faTCrmaA4iN57fUloRGE=
+github.com/vbauerster/mpb/v7 v7.0.3 h1:NfX0pHWhlDTev15M/C3qmSTM1EiIjcS+/d6qS6H4FnI=
+github.com/vbauerster/mpb/v7 v7.0.3/go.mod h1:NXGsfPGx6G2JssqvEcULtDqUrxuuYs4llpv8W6ZUpzk=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
@@ -1044,8 +1041,8 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 4a4352559..81af8ee65 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -2334,7 +2334,7 @@ func getSecretMount(tokens []string, secrets map[string]string, mountlabel strin
switch kv[0] {
case "id":
id = kv[1]
- case "target":
+ case "target", "dst", "destination":
target = kv[1]
case "required":
required, err = strconv.ParseBool(kv[1])
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
index 97347178a..8712a13fd 100644
--- a/vendor/github.com/containers/common/libimage/pull.go
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -61,7 +61,10 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
// Check whether `name` points to a transport. If so, we
// return the error. Otherwise we assume that `name` refers to
// an image on a registry (e.g., "fedora").
- if alltransports.TransportFromImageName(name) != nil {
+ //
+ // NOTE: the `docker` transport is an exception to support a
+ // `pull docker:latest` which would otherwise return an error.
+ if t := alltransports.TransportFromImageName(name); t != nil && t.Name() != registryTransport.Transport.Name() {
return nil, err
}
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index f429b96ed..a83aa9407 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -20,18 +20,18 @@
# "key = value"
# If it is empty or commented out, no annotations will be added
#
-# annotations = []
+#annotations = []
# Used to change the name of the default AppArmor profile of container engine.
#
-# apparmor_profile = "container-default"
+#apparmor_profile = "container-default"
# Default way to to create a cgroup namespace for the container
# Options are:
# `private` Create private Cgroup Namespace for the container.
# `host` Share host Cgroup Namespace with the container.
#
-# cgroupns = "private"
+#cgroupns = "private"
# Control container cgroup configuration
# Determines whether the container will create CGroups.
@@ -40,23 +40,23 @@
# `disabled` Disable cgroup support, will inherit cgroups from parent
# `no-conmon` Do not create a cgroup dedicated to conmon.
#
-# cgroups = "enabled"
+#cgroups = "enabled"
# List of default capabilities for containers. If it is empty or commented out,
# the default capabilities defined in the container engine will be added.
#
default_capabilities = [
- "CHOWN",
- "DAC_OVERRIDE",
- "FOWNER",
- "FSETID",
- "KILL",
- "NET_BIND_SERVICE",
- "SETFCAP",
- "SETGID",
- "SETPCAP",
- "SETUID",
- "SYS_CHROOT"
+ "CHOWN",
+ "DAC_OVERRIDE",
+ "FOWNER",
+ "FSETID",
+ "KILL",
+ "NET_BIND_SERVICE",
+ "SETFCAP",
+ "SETGID",
+ "SETPCAP",
+ "SETUID",
+ "SYS_CHROOT"
]
# A list of sysctls to be set in containers by default,
@@ -64,7 +64,7 @@ default_capabilities = [
# for example:"net.ipv4.ping_group_range = 0 0".
#
default_sysctls = [
- "net.ipv4.ping_group_range=0 0",
+ "net.ipv4.ping_group_range=0 0",
]
# A list of ulimits to be set in containers by default, specified as
@@ -75,24 +75,24 @@ default_sysctls = [
# container engine.
# Ulimits has limits for non privileged container engines.
#
-# default_ulimits = [
+#default_ulimits = [
# "nofile=1280:2560",
-# ]
+#]
# List of devices. Specified as
# "<device-on-host>:<device-on-container>:<permissions>", for example:
# "/dev/sdc:/dev/xvdc:rwm".
# If it is empty or commented out, only the default devices will be used
#
-# devices = []
+#devices = []
# List of default DNS options to be added to /etc/resolv.conf inside of the container.
#
-# dns_options = []
+#dns_options = []
# List of default DNS search domains to be added to /etc/resolv.conf inside of the container.
#
-# dns_searches = []
+#dns_searches = []
# Set default DNS servers.
# This option can be used to override the DNS configuration passed to the
@@ -100,19 +100,19 @@ default_sysctls = [
# /etc/resolv.conf in the container.
# The /etc/resolv.conf file in the image will be used without changes.
#
-# dns_servers = []
+#dns_servers = []
# Environment variable list for the conmon process; used for passing necessary
# environment variables to conmon or the runtime.
#
-# env = [
-# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-# "TERM=xterm",
-# ]
+#env = [
+# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+# "TERM=xterm",
+#]
# Pass all host environment variables into the container.
#
-# env_host = false
+#env_host = false
# Default proxy environment variables passed into the container.
# The environment variables passed in include:
@@ -121,49 +121,50 @@ default_sysctls = [
# should not use proxy. Proxy environment variables specified for the container
# in any other way will override the values passed from the host.
#
-# http_proxy = true
+#http_proxy = true
# Run an init inside the container that forwards signals and reaps processes.
#
-# init = false
+#init = false
-# Container init binary, if init=true, this is the init binary to be used for containers.
+# Container init binary, if init=true, this is the init binary to be used for containers.
#
-# init_path = "/usr/libexec/podman/catatonit"
+#init_path = "/usr/libexec/podman/catatonit"
# Default way to to create an IPC namespace (POSIX SysV IPC) for the container
# Options are:
# `private` Create private IPC Namespace for the container.
# `host` Share host IPC Namespace with the container.
#
-# ipcns = "private"
+#ipcns = "private"
# keyring tells the container engine whether to create
# a kernel keyring for use within the container.
-# keyring = true
+#
+#keyring = true
# label tells the container engine whether to use container separation using
# MAC(SELinux) labeling or not.
# The label flag is ignored on label disabled systems.
#
-# label = true
+#label = true
# Logging driver for the container. Available options: k8s-file and journald.
#
-# log_driver = "k8s-file"
+#log_driver = "k8s-file"
# Maximum size allowed for the container log file. Negative numbers indicate
# that no size limit is imposed. If positive, it must be >= 8192 to match or
# exceed conmon's read buffer. The file is truncated and re-opened so the
# limit is never exceeded.
#
-# log_size_max = -1
+#log_size_max = -1
# Specifies default format tag for container log messages.
# This is useful for creating a specific tag for container log messages.
# Containers logs default to truncated container ID as a tag.
#
-# log_tag = ""
+#log_tag = ""
# Default way to to create a Network namespace for the container
# Options are:
@@ -171,143 +172,147 @@ default_sysctls = [
# `host` Share host Network Namespace with the container.
# `none` Containers do not use the network
#
-# netns = "private"
+#netns = "private"
# Create /etc/hosts for the container. By default, container engine manage
# /etc/hosts, automatically adding the container's own IP address.
#
-# no_hosts = false
+#no_hosts = false
# Default way to to create a PID namespace for the container
# Options are:
# `private` Create private PID Namespace for the container.
# `host` Share host PID Namespace with the container.
#
-# pidns = "private"
+#pidns = "private"
# Maximum number of processes allowed in a container.
#
-# pids_limit = 2048
+#pids_limit = 2048
# Copy the content from the underlying image into the newly created volume
# when the container is created instead of when it is started. If false,
# the container engine will not copy the content until the container is started.
# Setting it to true may have negative performance implications.
#
-# prepare_volume_on_create = false
+#prepare_volume_on_create = false
# Indicates the networking to be used for rootless containers
-# rootless_networking = "slirp4netns"
+#
+#rootless_networking = "slirp4netns"
# Path to the seccomp.json profile which is used as the default seccomp profile
# for the runtime.
#
-# seccomp_profile = "/usr/share/containers/seccomp.json"
+#seccomp_profile = "/usr/share/containers/seccomp.json"
# Size of /dev/shm. Specified as <number><unit>.
# Unit is optional, values:
# b (bytes), k (kilobytes), m (megabytes), or g (gigabytes).
# If the unit is omitted, the system uses bytes.
#
-# shm_size = "65536k"
+#shm_size = "65536k"
# Set timezone in container. Takes IANA timezones as well as "local",
# which sets the timezone in the container to match the host machine.
#
-# tz = ""
+#tz = ""
# Set umask inside the container
#
-# umask = "0022"
+#umask = "0022"
# Default way to to create a User namespace for the container
# Options are:
# `auto` Create unique User Namespace for the container.
# `host` Share host User Namespace with the container.
#
-# userns = "host"
+#userns = "host"
# Number of UIDs to allocate for the automatic container creation.
# UIDs are allocated from the "container" UIDs listed in
# /etc/subuid & /etc/subgid
#
-# userns_size = 65536
+#userns_size = 65536
# Default way to to create a UTS namespace for the container
# Options are:
# `private` Create private UTS Namespace for the container.
# `host` Share host UTS Namespace with the container.
#
-# utsns = "private"
+#utsns = "private"
# List of volumes. Specified as
# "<directory-on-host>:<directory-in-container>:<options>", for example:
# "/db:/var/lib/db:ro".
# If it is empty or commented out, no volumes will be added
#
-# volumes = []
+#volumes = []
# The network table contains settings pertaining to the management of
# CNI plugins.
[secrets]
-# driver = "file"
+#driver = "file"
[secrets.opts]
-# root = "/example/directory"
+#root = "/example/directory"
[network]
# Path to directory where CNI plugin binaries are located.
#
-# cni_plugin_dirs = ["/usr/libexec/cni"]
+#cni_plugin_dirs = ["/usr/libexec/cni"]
# The network name of the default CNI network to attach pods to.
-# default_network = "podman"
+#
+#default_network = "podman"
# The default subnet for the default CNI network given in default_network.
# If a network with that name does not exist, a new network using that name and
# this subnet will be created.
# Must be a valid IPv4 CIDR prefix.
+#
#default_subnet = "10.88.0.0/16"
# Path to the directory where CNI configuration files are located.
#
-# network_config_dir = "/etc/cni/net.d/"
+#network_config_dir = "/etc/cni/net.d/"
[engine]
# Index to the active service
-# active_service = production
+#
+#active_service = production
# Cgroup management implementation used for the runtime.
# Valid options "systemd" or "cgroupfs"
#
-# cgroup_manager = "systemd"
+#cgroup_manager = "systemd"
# Environment variables to pass into conmon
#
-# conmon_env_vars = [
-# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-# ]
+#conmon_env_vars = [
+# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+#]
# Paths to look for the conmon container manager binary
#
-# conmon_path = [
-# "/usr/libexec/podman/conmon",
-# "/usr/local/libexec/podman/conmon",
-# "/usr/local/lib/podman/conmon",
-# "/usr/bin/conmon",
-# "/usr/sbin/conmon",
-# "/usr/local/bin/conmon",
-# "/usr/local/sbin/conmon"
-# ]
+#conmon_path = [
+# "/usr/libexec/podman/conmon",
+# "/usr/local/libexec/podman/conmon",
+# "/usr/local/lib/podman/conmon",
+# "/usr/bin/conmon",
+# "/usr/sbin/conmon",
+# "/usr/local/bin/conmon",
+# "/usr/local/sbin/conmon"
+#]
# Specify the keys sequence used to detach a container.
# Format is a single character [a-Z] or a comma separated sequence of
# `ctrl-<value>`, where `<value>` is one of:
# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_`
#
-# detach_keys = "ctrl-p,ctrl-q"
+#detach_keys = "ctrl-p,ctrl-q"
# Determines whether engine will reserve ports on the host when they are
# forwarded to containers. When enabled, when ports are forwarded to containers,
@@ -316,48 +321,51 @@ default_sysctls = [
# significant memory usage if a container has many ports forwarded to it.
# Disabling this can save memory.
#
-# enable_port_reservation = true
+#enable_port_reservation = true
# Environment variables to be used when running the container engine (e.g., Podman, Buildah).
# For example "http_proxy=internal.proxy.company.com".
# Note these environment variables will not be used within the container.
# Set the env section under [containers] table, if you want to set environment variables for the container.
-# env = []
+#
+#env = []
# Selects which logging mechanism to use for container engine events.
# Valid values are `journald`, `file` and `none`.
#
-# events_logger = "journald"
+#events_logger = "journald"
# Path to OCI hooks directories for automatically executed hooks.
#
-# hooks_dir = [
-# "/usr/share/containers/oci/hooks.d",
-# ]
+#hooks_dir = [
+# "/usr/share/containers/oci/hooks.d",
+#]
# Manifest Type (oci, v2s2, or v2s1) to use when pulling, pushing, building
# container images. By default image pulled and pushed match the format of the
# source image. Building/committing defaults to OCI.
-# image_default_format = ""
+#
+#image_default_format = ""
# Default transport method for pulling and pushing for images
#
-# image_default_transport = "docker://"
+#image_default_transport = "docker://"
# Maximum number of image layers to be copied (pulled/pushed) simultaneously.
# Not setting this field, or setting it to zero, will fall back to containers/image defaults.
-# image_parallel_copies = 0
+#
+#image_parallel_copies = 0
# Default command to run the infra container
#
-# infra_command = "/pause"
+#infra_command = "/pause"
# Infra (pause) container image name for pod infra containers. When running a
# pod, we start a `pause` process in a container to hold open the namespaces
# associated with the pod. This container does nothing other then sleep,
# reserving the pods resources for the lifetime of the pod.
#
-# infra_image = "k8s.gcr.io/pause:3.4.1"
+#infra_image = "k8s.gcr.io/pause:3.4.1"
# Specify the locking mechanism to use; valid values are "shm" and "file".
# Change the default only if you are sure of what you are doing, in general
@@ -365,18 +373,19 @@ default_sysctls = [
# faster "shm" lock type. You may need to run "podman system renumber" after
# you change the lock type.
#
-# lock_type** = "shm"
+#lock_type** = "shm"
# Indicates if Podman is running inside a VM via Podman Machine.
# Podman uses this value to do extra setup around networking from the
# container inside the VM to to host.
-# machine_enabled = false
+#
+#machine_enabled = false
# MultiImageArchive - if true, the container engine allows for storing archives
# (e.g., of the docker-archive transport) with multiple images. By default,
# Podman creates single-image archives.
#
-# multi_image_archive = "false"
+#multi_image_archive = "false"
# Default engine namespace
# If engine is joined to a namespace, it will see only containers and pods
@@ -385,131 +394,136 @@ default_sysctls = [
# The default namespace is "", which corresponds to no namespace. When no
# namespace is set, all containers and pods are visible.
#
-# namespace = ""
+#namespace = ""
# Path to the slirp4netns binary
#
-# network_cmd_path = ""
+#network_cmd_path = ""
# Default options to pass to the slirp4netns binary.
# For example "allow_host_loopback=true"
#
-# network_cmd_options = []
+#network_cmd_options = []
# Whether to use chroot instead of pivot_root in the runtime
#
-# no_pivot_root = false
+#no_pivot_root = false
# Number of locks available for containers and pods.
# If this is changed, a lock renumber must be performed (e.g. with the
# 'podman system renumber' command).
#
-# num_locks = 2048
+#num_locks = 2048
# Whether to pull new image before running a container
-# pull_policy = "missing"
+#
+#pull_policy = "missing"
# Indicates whether the application should be running in remote mode. This flag modifies the
# --remote option on container engines. Setting the flag to true will default
# `podman --remote=true` for access to the remote Podman service.
-# remote = false
+#
+#remote = false
# Default OCI runtime
#
-# runtime = "crun"
+#runtime = "crun"
# List of the OCI runtimes that support --format=json. When json is supported
# engine will use it for reporting nicer errors.
#
-# runtime_supports_json = ["crun", "runc", "kata", "runsc"]
+#runtime_supports_json = ["crun", "runc", "kata", "runsc"]
# List of the OCI runtimes that supports running containers with KVM Separation.
#
-# runtime_supports_kvm = ["kata"]
+#runtime_supports_kvm = ["kata"]
# List of the OCI runtimes that supports running containers without cgroups.
#
-# runtime_supports_nocgroups = ["crun"]
+#runtime_supports_nocgroups = ["crun"]
# Directory for persistent engine files (database, etc)
# By default, this will be configured relative to where the containers/storage
# stores containers
# Uncomment to change location from this default
#
-# static_dir = "/var/lib/containers/storage/libpod"
+#static_dir = "/var/lib/containers/storage/libpod"
# Number of seconds to wait for container to exit before sending kill signal.
-# stop_timeout = 10
+#
+#stop_timeout = 10
# map of service destinations
-# [service_destinations]
-# [service_destinations.production]
+#
+#[service_destinations]
+# [service_destinations.production]
# URI to access the Podman service
# Examples:
# rootless "unix://run/user/$UID/podman/podman.sock" (Default)
# rootfull "unix://run/podman/podman.sock (Default)
# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock
# remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock
-# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock"
-# Path to file containing ssh identity key
-# identity = "~/.ssh/id_rsa"
+#
+# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock"
+# Path to file containing ssh identity key
+# identity = "~/.ssh/id_rsa"
# Directory for temporary files. Must be tmpfs (wiped after reboot)
#
-# tmp_dir = "/run/libpod"
+#tmp_dir = "/run/libpod"
# Directory for libpod named volumes.
# By default, this will be configured relative to where containers/storage
# stores containers.
# Uncomment to change location from this default.
#
-# volume_path = "/var/lib/containers/storage/volumes"
+#volume_path = "/var/lib/containers/storage/volumes"
# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, etc)
[engine.runtimes]
-# crun = [
-# "/usr/bin/crun",
-# "/usr/sbin/crun",
-# "/usr/local/bin/crun",
-# "/usr/local/sbin/crun",
-# "/sbin/crun",
-# "/bin/crun",
-# "/run/current-system/sw/bin/crun",
-# ]
-
-# kata = [
-# "/usr/bin/kata-runtime",
-# "/usr/sbin/kata-runtime",
-# "/usr/local/bin/kata-runtime",
-# "/usr/local/sbin/kata-runtime",
-# "/sbin/kata-runtime",
-# "/bin/kata-runtime",
-# "/usr/bin/kata-qemu",
-# "/usr/bin/kata-fc",
-# ]
-
-# runc = [
-# "/usr/bin/runc",
-# "/usr/sbin/runc",
-# "/usr/local/bin/runc",
-# "/usr/local/sbin/runc",
-# "/sbin/runc",
-# "/bin/runc",
-# "/usr/lib/cri-o-runc/sbin/runc",
-# ]
-
-# runsc = [
-# "/usr/bin/runsc",
-# "/usr/sbin/runsc",
-# "/usr/local/bin/runsc",
-# "/usr/local/sbin/runsc",
-# "/bin/runsc",
-# "/sbin/runsc",
-# "/run/current-system/sw/bin/runsc",
-# ]
+#crun = [
+# "/usr/bin/crun",
+# "/usr/sbin/crun",
+# "/usr/local/bin/crun",
+# "/usr/local/sbin/crun",
+# "/sbin/crun",
+# "/bin/crun",
+# "/run/current-system/sw/bin/crun",
+#]
+
+#kata = [
+# "/usr/bin/kata-runtime",
+# "/usr/sbin/kata-runtime",
+# "/usr/local/bin/kata-runtime",
+# "/usr/local/sbin/kata-runtime",
+# "/sbin/kata-runtime",
+# "/bin/kata-runtime",
+# "/usr/bin/kata-qemu",
+# "/usr/bin/kata-fc",
+#]
+
+#runc = [
+# "/usr/bin/runc",
+# "/usr/sbin/runc",
+# "/usr/local/bin/runc",
+# "/usr/local/sbin/runc",
+# "/sbin/runc",
+# "/bin/runc",
+# "/usr/lib/cri-o-runc/sbin/runc",
+#]
+
+#runsc = [
+# "/usr/bin/runsc",
+# "/usr/sbin/runsc",
+# "/usr/local/bin/runsc",
+# "/usr/local/sbin/runsc",
+# "/bin/runsc",
+# "/sbin/runsc",
+# "/run/current-system/sw/bin/runsc",
+#]
[engine.volume_plugins]
-# testplugin = "/run/podman/plugins/test.sock"
+#testplugin = "/run/podman/plugins/test.sock"
# The [engine.volume_plugins] table MUST be the last entry in this file.
# (Unless another table is added)
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index 47dca527c..1e9e48f33 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.41.1-dev"
+const Version = "0.42.1"