summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/shared/create.go3
-rw-r--r--docs/source/markdown/libpod.conf.5.md3
-rw-r--r--go.mod4
-rw-r--r--go.sum9
-rw-r--r--libpod/config/config.go166
-rw-r--r--libpod/config/config_test.go16
-rw-r--r--libpod/config/merge.go183
-rw-r--r--libpod/config/merge_test.go157
-rw-r--r--pkg/api/handlers/generic/containers_create.go (renamed from pkg/api/handlers/containers_create.go)34
-rw-r--r--pkg/api/handlers/generic/images.go2
-rw-r--r--pkg/api/handlers/generic/swagger.go6
-rw-r--r--pkg/api/handlers/libpod/containers_create.go29
-rw-r--r--pkg/api/handlers/libpod/pods.go4
-rw-r--r--pkg/api/handlers/types.go9
-rw-r--r--pkg/api/handlers/utils/containers.go25
-rw-r--r--pkg/api/server/register_containers.go28
-rw-r--r--pkg/api/server/register_images.go4
-rw-r--r--pkg/api/server/register_pods.go19
-rw-r--r--pkg/bindings/containers/create.go30
-rw-r--r--pkg/seccomp/seccomp.go2
-rw-r--r--pkg/spec/config_linux.go18
-rw-r--r--pkg/spec/config_unsupported.go4
-rw-r--r--pkg/spec/createconfig.go5
-rw-r--r--pkg/spec/parse.go14
-rw-r--r--pkg/spec/spec.go50
-rw-r--r--pkg/spec/storage.go4
-rw-r--r--pkg/specgen/config_linux_cgo.go62
-rw-r--r--pkg/specgen/config_linux_nocgo.go11
-rw-r--r--pkg/specgen/config_unsupported.go12
-rw-r--r--pkg/specgen/create.go187
-rw-r--r--pkg/specgen/namespaces.go467
-rw-r--r--pkg/specgen/oci.go260
-rw-r--r--pkg/specgen/specgen.go99
-rw-r--r--pkg/specgen/validate.go159
-rw-r--r--vendor/github.com/containers/storage/CODE-OF-CONDUCT.md3
-rw-r--r--vendor/github.com/containers/storage/Makefile11
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/go.mod4
-rw-r--r--vendor/github.com/containers/storage/go.sum4
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2156
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go101
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_others.go25
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go15
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_linux.go15
-rw-r--r--vendor/github.com/containers/storage/store.go7
-rw-r--r--vendor/github.com/containers/storage/utils.go52
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go24
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go9
-rw-r--r--vendor/github.com/klauspost/compress/flate/level1.go5
-rw-r--r--vendor/github.com/klauspost/compress/flate/level2.go6
-rw-r--r--vendor/github.com/klauspost/compress/flate/level3.go6
-rw-r--r--vendor/github.com/klauspost/compress/flate/level4.go4
-rw-r--r--vendor/github.com/klauspost/compress/flate/level5.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/level6.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/stateless.go67
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_go15.go29
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_posix.go7
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_windows.go7
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go78
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go156
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go218
-rw-r--r--vendor/github.com/stretchr/testify/assert/forward_assertions.go2
-rw-r--r--vendor/github.com/stretchr/testify/require/forward_requirements.go2
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go210
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go156
-rw-r--r--vendor/github.com/stretchr/testify/require/requirements.go2
-rw-r--r--vendor/modules.txt8
69 files changed, 4641 insertions, 846 deletions
diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go
index 99538b3dc..5b244699c 100644
--- a/cmd/podman/shared/create.go
+++ b/cmd/podman/shared/create.go
@@ -701,9 +701,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
Sysctl: sysctl,
}
- if err := secConfig.SetLabelOpts(runtime, pid, ipc); err != nil {
- return nil, err
- }
if err := secConfig.SetSecurityOpts(runtime, c.StringArray("security-opt")); err != nil {
return nil, err
}
diff --git a/docs/source/markdown/libpod.conf.5.md b/docs/source/markdown/libpod.conf.5.md
index c28c80b56..ca45bccf6 100644
--- a/docs/source/markdown/libpod.conf.5.md
+++ b/docs/source/markdown/libpod.conf.5.md
@@ -83,7 +83,8 @@ libpod to manage containers.
containers and pods are visible.
**label**="true|false"
- Indicates whether the containers should use label separation.
+ Indicates whether the containers should use label separation by default.
+ Can be overridden via `--security-opt label=...` on the CLI.
**num_locks**=""
Number of locks available for containers and pods. Each created container or pod consumes one lock.
diff --git a/go.mod b/go.mod
index 34967bdd1..4248df975 100644
--- a/go.mod
+++ b/go.mod
@@ -14,7 +14,7 @@ require (
github.com/containers/conmon v2.0.10+incompatible
github.com/containers/image/v5 v5.2.1
github.com/containers/psgo v1.4.0
- github.com/containers/storage v1.15.8
+ github.com/containers/storage v1.16.0
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
@@ -63,7 +63,7 @@ require (
github.com/sirupsen/logrus v1.4.2
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.4.0
+ github.com/stretchr/testify v1.5.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/uber/jaeger-client-go v2.22.1+incompatible
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 // indirect
diff --git a/go.sum b/go.sum
index 97a5f1e31..58eb00433 100644
--- a/go.sum
+++ b/go.sum
@@ -87,6 +87,8 @@ github.com/containers/storage v1.15.3/go.mod h1:v0lq/3f+cXH3Y/HiDaFYRR0zilwDve7I
github.com/containers/storage v1.15.5/go.mod h1:v0lq/3f+cXH3Y/HiDaFYRR0zilwDve7I4W7U5xQxvF8=
github.com/containers/storage v1.15.8 h1:ef7OfUMTpyq0PIVAhV7qfufEI92gAldk25nItrip+6Q=
github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI=
+github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w=
+github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
@@ -260,6 +262,8 @@ github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y=
+github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@@ -283,6 +287,8 @@ github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.9 h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
+github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
@@ -426,12 +432,15 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw=
+github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
diff --git a/libpod/config/config.go b/libpod/config/config.go
index 13c128688..c72a0efc7 100644
--- a/libpod/config/config.go
+++ b/libpod/config/config.go
@@ -2,7 +2,7 @@ package config
import (
"bytes"
- "io/ioutil"
+ "fmt"
"os"
"os/exec"
"path/filepath"
@@ -287,18 +287,16 @@ type DBConfig struct {
}
// readConfigFromFile reads the specified config file at `path` and attempts to
-// unmarshal its content into a Config.
-func readConfigFromFile(path string) (*Config, error) {
- var config Config
-
- configBytes, err := ioutil.ReadFile(path)
+// unmarshal its content into a Config. The config param specifies the previous
+// default config. If the path, only specifies a few fields in the Toml file
+// the defaults from the config parameter will be used for all other fields.
+func readConfigFromFile(path string, config *Config) (*Config, error) {
+ logrus.Debugf("Reading configuration file %q", path)
+ _, err := toml.DecodeFile(path, config)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("unable to decode configuration %v: %v", path, err)
}
- logrus.Debugf("Reading configuration file %q", path)
- err = toml.Unmarshal(configBytes, &config)
-
// For the sake of backwards compat we need to check if the config fields
// with *Set suffix are set in the config. Note that the storage-related
// fields are NOT set in the config here but in the storage.conf OR directly
@@ -313,7 +311,7 @@ func readConfigFromFile(path string) (*Config, error) {
config.TmpDirSet = true
}
- return &config, err
+ return config, err
}
// Write decodes the config as TOML and writes it to the specified path.
@@ -439,15 +437,11 @@ func probeConmon(conmonBinary string) error {
// with cgroupsv2. Other OCI runtimes are not yet supporting cgroupsv2. This
// might change in the future.
func NewConfig(userConfigPath string) (*Config, error) {
- config := &Config{} // start with an empty config
-
- // First, try to read the user-specified config
- if userConfigPath != "" {
- var err error
- config, err = readConfigFromFile(userConfigPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath)
- }
+ // Start with the default config and interatively merge fields in the system
+ // configs.
+ config, err := defaultConfigFromMemory()
+ if err != nil {
+ return nil, err
}
// Now, check if the user can access system configs and merge them if needed.
@@ -456,44 +450,45 @@ func NewConfig(userConfigPath string) (*Config, error) {
return nil, errors.Wrapf(err, "error finding config on system")
}
- migrated := false
for _, path := range configs {
- systemConfig, err := readConfigFromFile(path)
+ config, err = readConfigFromFile(path, config)
if err != nil {
return nil, errors.Wrapf(err, "error reading system config %q", path)
}
- // Handle CGroups v2 configuration migration.
- // Migrate only the first config, and do it before
- // merging.
- if !migrated {
- if err := cgroupV2Check(path, systemConfig); err != nil {
- return nil, errors.Wrapf(err, "error rewriting configuration file %s", userConfigPath)
- }
- migrated = true
- }
- // Merge the it into the config. Any unset field in config will be
- // over-written by the systemConfig.
- if err := config.mergeConfig(systemConfig); err != nil {
- return nil, errors.Wrapf(err, "error merging system config")
- }
- logrus.Debugf("Merged system config %q: %v", path, config)
}
- // Finally, create a default config from memory and forcefully merge it into
- // the config. This way we try to make sure that all fields are properly set
- // and that user AND system config can partially set.
- defaultConfig, err := defaultConfigFromMemory()
- if err != nil {
- return nil, errors.Wrapf(err, "error generating default config from memory")
+ // First, try to read the user-specified config
+ if userConfigPath != "" {
+ var err error
+ config, err = readConfigFromFile(userConfigPath, config)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath)
+ }
}
- // Check if we need to switch to cgroupfs and logger=file on rootless.
- defaultConfig.checkCgroupsAndLogger()
-
- if err := config.mergeConfig(defaultConfig); err != nil {
- return nil, errors.Wrapf(err, "error merging default config from memory")
+ // Since runc does not currently support cgroupV2
+ // Change to default crun on first running of libpod.conf
+ // TODO Once runc has support for cgroups, this function should be removed.
+ if !config.CgroupCheck && rootless.IsRootless() {
+ cgroupsV2, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ if cgroupsV2 {
+ path, err := exec.LookPath("crun")
+ if err != nil {
+ // Can't find crun path so do nothing
+ logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err)
+ } else {
+ config.CgroupCheck = true
+ config.OCIRuntime = path
+ }
+ }
}
+ // If we need to, switch to cgroupfs and logger=file on rootless.
+ config.checkCgroupsAndLogger()
+
// Relative paths can cause nasty bugs, because core paths we use could
// shift between runs (or even parts of the program - the OCI runtime
// uses a different working directory than we do, for example.
@@ -532,12 +527,12 @@ func systemConfigs() ([]string, error) {
}
configs := []string{}
- if _, err := os.Stat(_rootOverrideConfigPath); err == nil {
- configs = append(configs, _rootOverrideConfigPath)
- }
if _, err := os.Stat(_rootConfigPath); err == nil {
configs = append(configs, _rootConfigPath)
}
+ if _, err := os.Stat(_rootOverrideConfigPath); err == nil {
+ configs = append(configs, _rootOverrideConfigPath)
+ }
return configs, nil
}
@@ -568,29 +563,56 @@ func (c *Config) checkCgroupsAndLogger() {
}
}
-// Since runc does not currently support cgroupV2
-// Change to default crun on first running of libpod.conf
-// TODO Once runc has support for cgroups, this function should be removed.
-func cgroupV2Check(configPath string, tmpConfig *Config) error {
- if !tmpConfig.CgroupCheck && rootless.IsRootless() {
- logrus.Debugf("Rewriting %s for CGroup v2 upgrade", configPath)
- cgroupsV2, err := cgroups.IsCgroup2UnifiedMode()
- if err != nil {
- return err
+// MergeDBConfig merges the configuration from the database.
+func (c *Config) MergeDBConfig(dbConfig *DBConfig) error {
+
+ if !c.StorageConfigRunRootSet && dbConfig.StorageTmp != "" {
+ if c.StorageConfig.RunRoot != dbConfig.StorageTmp &&
+ c.StorageConfig.RunRoot != "" {
+ logrus.Debugf("Overriding run root %q with %q from database",
+ c.StorageConfig.RunRoot, dbConfig.StorageTmp)
}
- if cgroupsV2 {
- path, err := exec.LookPath("crun")
- if err != nil {
- logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err)
- // Can't find crun path so do nothing
- return nil
- }
- tmpConfig.CgroupCheck = true
- tmpConfig.OCIRuntime = path
- if err := tmpConfig.Write(configPath); err != nil {
- return err
- }
+ c.StorageConfig.RunRoot = dbConfig.StorageTmp
+ }
+
+ if !c.StorageConfigGraphRootSet && dbConfig.StorageRoot != "" {
+ if c.StorageConfig.GraphRoot != dbConfig.StorageRoot &&
+ c.StorageConfig.GraphRoot != "" {
+ logrus.Debugf("Overriding graph root %q with %q from database",
+ c.StorageConfig.GraphRoot, dbConfig.StorageRoot)
+ }
+ c.StorageConfig.GraphRoot = dbConfig.StorageRoot
+ }
+
+ if !c.StorageConfigGraphDriverNameSet && dbConfig.GraphDriver != "" {
+ if c.StorageConfig.GraphDriverName != dbConfig.GraphDriver &&
+ c.StorageConfig.GraphDriverName != "" {
+ logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
+ c.StorageConfig.GraphDriverName, dbConfig.GraphDriver)
+ }
+ c.StorageConfig.GraphDriverName = dbConfig.GraphDriver
+ }
+
+ if !c.StaticDirSet && dbConfig.LibpodRoot != "" {
+ if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
+ logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
+ }
+ c.StaticDir = dbConfig.LibpodRoot
+ }
+
+ if !c.TmpDirSet && dbConfig.LibpodTmp != "" {
+ if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
+ logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
+ }
+ c.TmpDir = dbConfig.LibpodTmp
+ c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
+ }
+
+ if !c.VolumePathSet && dbConfig.VolumePath != "" {
+ if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
+ logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
}
+ c.VolumePath = dbConfig.VolumePath
}
return nil
}
diff --git a/libpod/config/config_test.go b/libpod/config/config_test.go
index 47c092440..24620ce0e 100644
--- a/libpod/config/config_test.go
+++ b/libpod/config/config_test.go
@@ -11,14 +11,14 @@ import (
func TestEmptyConfig(t *testing.T) {
// Make sure that we can read empty configs
- config, err := readConfigFromFile("testdata/empty.conf")
+ config, err := readConfigFromFile("testdata/empty.conf", &Config{})
assert.NotNil(t, config)
assert.Nil(t, err)
}
func TestDefaultLibpodConf(t *testing.T) {
// Make sure that we can read the default libpod.conf
- config, err := readConfigFromFile("testdata/libpod.conf")
+ config, err := readConfigFromFile("testdata/libpod.conf", &Config{})
assert.NotNil(t, config)
assert.Nil(t, err)
}
@@ -32,13 +32,10 @@ func TestMergeEmptyAndDefaultMemoryConfig(t *testing.T) {
defaultConfig.StateType = define.InvalidStateStore
defaultConfig.StorageConfig = storage.StoreOptions{}
- emptyConfig, err := readConfigFromFile("testdata/empty.conf")
+ emptyConfig, err := readConfigFromFile("testdata/empty.conf", defaultConfig)
assert.NotNil(t, emptyConfig)
assert.Nil(t, err)
- err = emptyConfig.mergeConfig(defaultConfig)
- assert.Nil(t, err)
-
equal := reflect.DeepEqual(emptyConfig, defaultConfig)
assert.True(t, equal)
}
@@ -46,19 +43,16 @@ func TestMergeEmptyAndDefaultMemoryConfig(t *testing.T) {
func TestMergeEmptyAndLibpodConfig(t *testing.T) {
// Make sure that when we merge the default config into an empty one that we
// effectively get the default config.
- libpodConfig, err := readConfigFromFile("testdata/libpod.conf")
+ libpodConfig, err := readConfigFromFile("testdata/libpod.conf", &Config{})
assert.NotNil(t, libpodConfig)
assert.Nil(t, err)
libpodConfig.StateType = define.InvalidStateStore
libpodConfig.StorageConfig = storage.StoreOptions{}
- emptyConfig, err := readConfigFromFile("testdata/empty.conf")
+ emptyConfig, err := readConfigFromFile("testdata/empty.conf", libpodConfig)
assert.NotNil(t, emptyConfig)
assert.Nil(t, err)
- err = emptyConfig.mergeConfig(libpodConfig)
- assert.Nil(t, err)
-
equal := reflect.DeepEqual(emptyConfig, libpodConfig)
assert.True(t, equal)
}
diff --git a/libpod/config/merge.go b/libpod/config/merge.go
deleted file mode 100644
index 798a63da7..000000000
--- a/libpod/config/merge.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package config
-
-import (
- "path/filepath"
-
- "github.com/containers/libpod/libpod/define"
- "github.com/sirupsen/logrus"
-)
-
-// Merge merges the other config into the current one. Note that a field of the
-// other config is only merged when it's not already set in the current one.
-//
-// Note that the StateType and the StorageConfig will NOT be changed.
-func (c *Config) mergeConfig(other *Config) error {
- // strings
- c.CgroupManager = mergeStrings(c.CgroupManager, other.CgroupManager)
- c.CNIConfigDir = mergeStrings(c.CNIConfigDir, other.CNIConfigDir)
- c.CNIDefaultNetwork = mergeStrings(c.CNIDefaultNetwork, other.CNIDefaultNetwork)
- c.DefaultMountsFile = mergeStrings(c.DefaultMountsFile, other.DefaultMountsFile)
- c.DetachKeys = mergeStrings(c.DetachKeys, other.DetachKeys)
- c.EventsLogFilePath = mergeStrings(c.EventsLogFilePath, other.EventsLogFilePath)
- c.EventsLogger = mergeStrings(c.EventsLogger, other.EventsLogger)
- c.ImageDefaultTransport = mergeStrings(c.ImageDefaultTransport, other.ImageDefaultTransport)
- c.InfraCommand = mergeStrings(c.InfraCommand, other.InfraCommand)
- c.InfraImage = mergeStrings(c.InfraImage, other.InfraImage)
- c.InitPath = mergeStrings(c.InitPath, other.InitPath)
- c.LockType = mergeStrings(c.LockType, other.LockType)
- c.Namespace = mergeStrings(c.Namespace, other.Namespace)
- c.NetworkCmdPath = mergeStrings(c.NetworkCmdPath, other.NetworkCmdPath)
- c.OCIRuntime = mergeStrings(c.OCIRuntime, other.OCIRuntime)
- c.SignaturePolicyPath = mergeStrings(c.SignaturePolicyPath, other.SignaturePolicyPath)
- c.StaticDir = mergeStrings(c.StaticDir, other.StaticDir)
- c.TmpDir = mergeStrings(c.TmpDir, other.TmpDir)
- c.VolumePath = mergeStrings(c.VolumePath, other.VolumePath)
-
- // string map of slices
- c.OCIRuntimes = mergeStringMaps(c.OCIRuntimes, other.OCIRuntimes)
-
- // string slices
- c.CNIPluginDir = mergeStringSlices(c.CNIPluginDir, other.CNIPluginDir)
- c.ConmonEnvVars = mergeStringSlices(c.ConmonEnvVars, other.ConmonEnvVars)
- c.ConmonPath = mergeStringSlices(c.ConmonPath, other.ConmonPath)
- c.HooksDir = mergeStringSlices(c.HooksDir, other.HooksDir)
- c.RuntimePath = mergeStringSlices(c.RuntimePath, other.RuntimePath)
- c.RuntimeSupportsJSON = mergeStringSlices(c.RuntimeSupportsJSON, other.RuntimeSupportsJSON)
- c.RuntimeSupportsNoCgroups = mergeStringSlices(c.RuntimeSupportsNoCgroups, other.RuntimeSupportsNoCgroups)
-
- // int64s
- c.MaxLogSize = mergeInt64s(c.MaxLogSize, other.MaxLogSize)
-
- // uint32s
- c.NumLocks = mergeUint32s(c.NumLocks, other.NumLocks)
-
- // bools
- c.EnableLabeling = mergeBools(c.EnableLabeling, other.EnableLabeling)
- c.EnablePortReservation = mergeBools(c.EnablePortReservation, other.EnablePortReservation)
- c.NoPivotRoot = mergeBools(c.NoPivotRoot, other.NoPivotRoot)
- c.SDNotify = mergeBools(c.SDNotify, other.SDNotify)
-
- // state type
- if c.StateType == define.InvalidStateStore {
- c.StateType = other.StateType
- }
-
- // store options - need to check all fields since some configs might only
- // set it partially
- c.StorageConfig.RunRoot = mergeStrings(c.StorageConfig.RunRoot, other.StorageConfig.RunRoot)
- c.StorageConfig.GraphRoot = mergeStrings(c.StorageConfig.GraphRoot, other.StorageConfig.GraphRoot)
- c.StorageConfig.GraphDriverName = mergeStrings(c.StorageConfig.GraphDriverName, other.StorageConfig.GraphDriverName)
- c.StorageConfig.GraphDriverOptions = mergeStringSlices(c.StorageConfig.GraphDriverOptions, other.StorageConfig.GraphDriverOptions)
- if c.StorageConfig.UIDMap == nil {
- c.StorageConfig.UIDMap = other.StorageConfig.UIDMap
- }
- if c.StorageConfig.GIDMap == nil {
- c.StorageConfig.GIDMap = other.StorageConfig.GIDMap
- }
-
- // backwards compat *Set fields
- c.StorageConfigRunRootSet = mergeBools(c.StorageConfigRunRootSet, other.StorageConfigRunRootSet)
- c.StorageConfigGraphRootSet = mergeBools(c.StorageConfigGraphRootSet, other.StorageConfigGraphRootSet)
- c.StorageConfigGraphDriverNameSet = mergeBools(c.StorageConfigGraphDriverNameSet, other.StorageConfigGraphDriverNameSet)
- c.VolumePathSet = mergeBools(c.VolumePathSet, other.VolumePathSet)
- c.StaticDirSet = mergeBools(c.StaticDirSet, other.StaticDirSet)
- c.TmpDirSet = mergeBools(c.TmpDirSet, other.TmpDirSet)
-
- return nil
-}
-
-// MergeDBConfig merges the configuration from the database.
-func (c *Config) MergeDBConfig(dbConfig *DBConfig) error {
-
- if !c.StorageConfigRunRootSet && dbConfig.StorageTmp != "" {
- if c.StorageConfig.RunRoot != dbConfig.StorageTmp &&
- c.StorageConfig.RunRoot != "" {
- logrus.Debugf("Overriding run root %q with %q from database",
- c.StorageConfig.RunRoot, dbConfig.StorageTmp)
- }
- c.StorageConfig.RunRoot = dbConfig.StorageTmp
- }
-
- if !c.StorageConfigGraphRootSet && dbConfig.StorageRoot != "" {
- if c.StorageConfig.GraphRoot != dbConfig.StorageRoot &&
- c.StorageConfig.GraphRoot != "" {
- logrus.Debugf("Overriding graph root %q with %q from database",
- c.StorageConfig.GraphRoot, dbConfig.StorageRoot)
- }
- c.StorageConfig.GraphRoot = dbConfig.StorageRoot
- }
-
- if !c.StorageConfigGraphDriverNameSet && dbConfig.GraphDriver != "" {
- if c.StorageConfig.GraphDriverName != dbConfig.GraphDriver &&
- c.StorageConfig.GraphDriverName != "" {
- logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
- c.StorageConfig.GraphDriverName, dbConfig.GraphDriver)
- }
- c.StorageConfig.GraphDriverName = dbConfig.GraphDriver
- }
-
- if !c.StaticDirSet && dbConfig.LibpodRoot != "" {
- if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
- logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
- }
- c.StaticDir = dbConfig.LibpodRoot
- }
-
- if !c.TmpDirSet && dbConfig.LibpodTmp != "" {
- if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
- logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
- }
- c.TmpDir = dbConfig.LibpodTmp
- c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
- }
-
- if !c.VolumePathSet && dbConfig.VolumePath != "" {
- if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
- logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
- }
- c.VolumePath = dbConfig.VolumePath
- }
- return nil
-}
-
-func mergeStrings(a, b string) string {
- if a == "" {
- return b
- }
- return a
-}
-
-func mergeStringSlices(a, b []string) []string {
- if len(a) == 0 && b != nil {
- return b
- }
- return a
-}
-
-func mergeStringMaps(a, b map[string][]string) map[string][]string {
- if len(a) == 0 && b != nil {
- return b
- }
- return a
-}
-
-func mergeInt64s(a, b int64) int64 {
- if a == 0 {
- return b
- }
- return a
-}
-
-func mergeUint32s(a, b uint32) uint32 {
- if a == 0 {
- return b
- }
- return a
-}
-
-func mergeBools(a, b bool) bool {
- if !a {
- return b
- }
- return a
-}
diff --git a/libpod/config/merge_test.go b/libpod/config/merge_test.go
deleted file mode 100644
index eb450b273..000000000
--- a/libpod/config/merge_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package config
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestMergeStrings(t *testing.T) {
- testData := []struct {
- a string
- b string
- res string
- }{
- {"", "", ""},
- {"a", "", "a"},
- {"a", "b", "a"},
- {"", "b", "b"},
- }
- for _, data := range testData {
- res := mergeStrings(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeStringSlices(t *testing.T) {
- testData := []struct {
- a []string
- b []string
- res []string
- }{
- {
- nil, nil, nil,
- },
- {
- nil,
- []string{},
- []string{},
- },
- {
- []string{},
- nil,
- []string{},
- },
- {
- []string{},
- []string{},
- []string{},
- },
- {
- []string{"a"},
- []string{},
- []string{"a"},
- },
- {
- []string{"a"},
- []string{"b"},
- []string{"a"},
- },
- {
- []string{},
- []string{"b"},
- []string{"b"},
- },
- }
- for _, data := range testData {
- res := mergeStringSlices(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeStringMaps(t *testing.T) {
- testData := []struct {
- a map[string][]string
- b map[string][]string
- res map[string][]string
- }{
- {
- nil, nil, nil,
- },
- {
- nil,
- map[string][]string{},
- map[string][]string{}},
- {
- map[string][]string{"a": {"a"}},
- nil,
- map[string][]string{"a": {"a"}},
- },
- {
- nil,
- map[string][]string{"b": {"b"}},
- map[string][]string{"b": {"b"}},
- },
- {
- map[string][]string{"a": {"a"}},
- map[string][]string{"b": {"b"}},
- map[string][]string{"a": {"a"}},
- },
- }
- for _, data := range testData {
- res := mergeStringMaps(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeInts64(t *testing.T) {
- testData := []struct {
- a int64
- b int64
- res int64
- }{
- {int64(0), int64(0), int64(0)},
- {int64(1), int64(0), int64(1)},
- {int64(0), int64(1), int64(1)},
- {int64(2), int64(1), int64(2)},
- {int64(-1), int64(1), int64(-1)},
- {int64(0), int64(-1), int64(-1)},
- }
- for _, data := range testData {
- res := mergeInt64s(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-func TestMergeUint32(t *testing.T) {
- testData := []struct {
- a uint32
- b uint32
- res uint32
- }{
- {uint32(0), uint32(0), uint32(0)},
- {uint32(1), uint32(0), uint32(1)},
- {uint32(0), uint32(1), uint32(1)},
- {uint32(2), uint32(1), uint32(2)},
- }
- for _, data := range testData {
- res := mergeUint32s(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeBools(t *testing.T) {
- testData := []struct {
- a bool
- b bool
- res bool
- }{
- {false, false, false},
- {true, false, true},
- {false, true, true},
- {true, true, true},
- }
- for _, data := range testData {
- res := mergeBools(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
diff --git a/pkg/api/handlers/containers_create.go b/pkg/api/handlers/generic/containers_create.go
index 48f0de94d..7e542752f 100644
--- a/pkg/api/handlers/containers_create.go
+++ b/pkg/api/handlers/generic/containers_create.go
@@ -1,4 +1,4 @@
-package handlers
+package generic
import (
"encoding/json"
@@ -6,10 +6,10 @@ import (
"net/http"
"strings"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
image2 "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/signal"
@@ -17,14 +17,13 @@ import (
"github.com/containers/storage"
"github.com/gorilla/schema"
"github.com/pkg/errors"
- log "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func CreateContainer(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
- input := CreateContainerConfig{}
+ input := handlers.CreateContainerConfig{}
query := struct {
Name string `schema:"name"`
}{
@@ -52,34 +51,11 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "makeCreatConfig()"))
return
}
-
cc.Name = query.Name
- var pod *libpod.Pod
- ctr, err := shared.CreateContainerFromCreateConfig(runtime, &cc, r.Context(), pod)
- if err != nil {
- if strings.Contains(err.Error(), "invalid log driver") {
- // this does not quite work yet and needs a little more massaging
- w.Header().Set("Content-Type", "text/plain; charset=us-ascii")
- w.WriteHeader(http.StatusInternalServerError)
- msg := fmt.Sprintf("logger: no log driver named '%s' is registered", input.HostConfig.LogConfig.Type)
- if _, err := fmt.Fprintln(w, msg); err != nil {
- log.Errorf("%s: %q", msg, err)
- }
- //s.WriteResponse(w, http.StatusInternalServerError, fmt.Sprintf("logger: no log driver named '%s' is registered", input.HostConfig.LogConfig.Type))
- return
- }
- utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "CreateContainerFromCreateConfig()"))
- return
- }
-
- response := ContainerCreateResponse{
- ID: ctr.ID(),
- Warnings: []string{}}
-
- utils.WriteResponse(w, http.StatusCreated, response)
+ utils.CreateContainer(r.Context(), w, runtime, &cc)
}
-func makeCreateConfig(input CreateContainerConfig, newImage *image2.Image) (createconfig.CreateConfig, error) {
+func makeCreateConfig(input handlers.CreateContainerConfig, newImage *image2.Image) (createconfig.CreateConfig, error) {
var (
err error
init bool
diff --git a/pkg/api/handlers/generic/images.go b/pkg/api/handlers/generic/images.go
index afd107207..1ced499d9 100644
--- a/pkg/api/handlers/generic/images.go
+++ b/pkg/api/handlers/generic/images.go
@@ -255,7 +255,7 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
tag – Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled.
*/
fromImage := query.FromImage
- if len(query.Tag) < 1 {
+ if len(query.Tag) >= 1 {
fromImage = fmt.Sprintf("%s:%s", fromImage, query.Tag)
}
diff --git a/pkg/api/handlers/generic/swagger.go b/pkg/api/handlers/generic/swagger.go
index bfe527c41..c9c9610bb 100644
--- a/pkg/api/handlers/generic/swagger.go
+++ b/pkg/api/handlers/generic/swagger.go
@@ -1,13 +1,15 @@
package generic
-import "github.com/containers/libpod/pkg/api/handlers"
+import (
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+)
// Create container
// swagger:response ContainerCreateResponse
type swagCtrCreateResponse struct {
// in:body
Body struct {
- handlers.ContainerCreateResponse
+ utils.ContainerCreateResponse
}
}
diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go
new file mode 100644
index 000000000..ebca41151
--- /dev/null
+++ b/pkg/api/handlers/libpod/containers_create.go
@@ -0,0 +1,29 @@
+package libpod
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/specgen"
+ "github.com/pkg/errors"
+)
+
+// CreateContainer takes a specgenerator and makes a container. It returns
+// the new container ID on success along with any warnings.
+func CreateContainer(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ var sg specgen.SpecGenerator
+ if err := json.NewDecoder(r.Body).Decode(&sg); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ return
+ }
+ ctr, err := sg.MakeContainer(runtime)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ response := utils.ContainerCreateResponse{ID: ctr.ID()}
+ utils.WriteJSON(w, http.StatusCreated, response)
+}
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index d043b1204..008b9b14b 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -99,12 +99,10 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http_code, err)
return
}
- utils.WriteResponse(w, http.StatusCreated, handlers.IDResponse{ID: pod.CgroupParent()})
+ utils.WriteResponse(w, http.StatusCreated, handlers.IDResponse{ID: pod.ID()})
}
func Pods(w http.ResponseWriter, r *http.Request) {
- // 200 ok
- // 500 internal
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
podInspectData []*libpod.PodInspect
diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go
index a50f183f7..6268028f5 100644
--- a/pkg/api/handlers/types.go
+++ b/pkg/api/handlers/types.go
@@ -151,6 +151,7 @@ type ContainerTopOKBody struct {
dockerContainer.ContainerTopOKBody
}
+// swagger:model PodCreateConfig
type PodCreateConfig struct {
Name string `json:"name"`
CGroupParent string `json:"cgroup-parent"`
@@ -548,11 +549,3 @@ func portsToPortSet(input map[string]struct{}) (nat.PortSet, error) {
}
return ports, nil
}
-
-// ContainerCreateResponse is the response struct for creating a container
-type ContainerCreateResponse struct {
- // ID of the container created
- ID string `json:"id"`
- // Warnings during container creation
- Warnings []string `json:"Warnings"`
-}
diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go
index c9bb9cf09..402005581 100644
--- a/pkg/api/handlers/utils/containers.go
+++ b/pkg/api/handlers/utils/containers.go
@@ -1,6 +1,7 @@
package utils
import (
+ "context"
"fmt"
"net/http"
"syscall"
@@ -9,10 +10,19 @@ import (
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
+ createconfig "github.com/containers/libpod/pkg/spec"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
+// ContainerCreateResponse is the response struct for creating a container
+type ContainerCreateResponse struct {
+ // ID of the container created
+ ID string `json:"id"`
+ // Warnings during container creation
+ Warnings []string `json:"Warnings"`
+}
+
func KillContainer(w http.ResponseWriter, r *http.Request) (*libpod.Container, error) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
@@ -119,3 +129,18 @@ func GenerateFilterFuncsFromMap(r *libpod.Runtime, filters map[string][]string)
}
return filterFuncs, nil
}
+
+func CreateContainer(ctx context.Context, w http.ResponseWriter, runtime *libpod.Runtime, cc *createconfig.CreateConfig) {
+ var pod *libpod.Pod
+ ctr, err := shared.CreateContainerFromCreateConfig(runtime, cc, ctx, pod)
+ if err != nil {
+ Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "CreateContainerFromCreateConfig()"))
+ return
+ }
+
+ response := ContainerCreateResponse{
+ ID: ctr.ID(),
+ Warnings: []string{}}
+
+ WriteResponse(w, http.StatusCreated, response)
+}
diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go
index 06877863c..6007a2d00 100644
--- a/pkg/api/server/register_containers.go
+++ b/pkg/api/server/register_containers.go
@@ -33,7 +33,7 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/create"), s.APIHandler(handlers.CreateContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/create"), s.APIHandler(generic.CreateContainer)).Methods(http.MethodPost)
// swagger:operation GET /containers/json compat listContainers
// ---
// tags:
@@ -550,7 +550,31 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
libpod endpoints
*/
- r.HandleFunc(VersionedPath("/libpod/containers/create"), s.APIHandler(handlers.CreateContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /containers/create libpod libpodContainerCreate
+ // ---
+ // summary: Create a container
+ // tags:
+ // - containers
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: body
+ // name: create
+ // description: attributes for creating a container
+ // schema:
+ // $ref: "#/definitions/SpecGenerator"
+ // responses:
+ // 201:
+ // $ref: "#/responses/ContainerCreateResponse"
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 409:
+ // $ref: "#/responses/ConflictError"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/create"), s.APIHandler(libpod.CreateContainer)).Methods(http.MethodPost)
// swagger:operation GET /libpod/containers/json libpod libpodListContainers
// ---
// tags:
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 5b9ac7952..4c8f05385 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -822,7 +822,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// 500:
// $ref: '#/responses/InternalError'
r.Handle(VersionedPath("/libpod/images/search"), s.APIHandler(handlers.SearchImages)).Methods(http.MethodGet)
- // swagger:operation DELETE /libpod/images/{name} libpod libpodRemoveImage
+ // swagger:operation DELETE /libpod/images/{name:.*} libpod libpodRemoveImage
// ---
// tags:
// - images
@@ -830,7 +830,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Delete an image from local store
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: name or ID of image to delete
diff --git a/pkg/api/server/register_pods.go b/pkg/api/server/register_pods.go
index 1c7486711..af2330665 100644
--- a/pkg/api/server/register_pods.go
+++ b/pkg/api/server/register_pods.go
@@ -26,6 +26,25 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// 500:
// $ref: "#/responses/InternalError"
r.Handle(VersionedPath("/libpod/pods/json"), s.APIHandler(libpod.Pods)).Methods(http.MethodGet)
+ // swagger:operation POST /libpod/pods/create pods CreatePod
+ // ---
+ // summary: Create a pod
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: body
+ // name: create
+ // description: attributes for creating a pod
+ // schema:
+ // type: object
+ // $ref: "#/definitions/PodCreateConfig"
+ // responses:
+ // 200:
+ // $ref: "#/definitions/IdResponse"
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 500:
+ // $ref: "#/responses/InternalError"
r.Handle(VersionedPath("/libpod/pods/create"), s.APIHandler(libpod.PodCreate)).Methods(http.MethodPost)
// swagger:operation POST /libpod/pods/prune pods PrunePods
// ---
diff --git a/pkg/bindings/containers/create.go b/pkg/bindings/containers/create.go
new file mode 100644
index 000000000..18b32335b
--- /dev/null
+++ b/pkg/bindings/containers/create.go
@@ -0,0 +1,30 @@
+package containers
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/specgen"
+ jsoniter "github.com/json-iterator/go"
+)
+
+func CreateWithSpec(ctx context.Context, s specgen.SpecGenerator) (utils.ContainerCreateResponse, error) {
+ var ccr utils.ContainerCreateResponse
+ conn, err := bindings.GetConnectionFromContext(ctx)
+ if err != nil {
+ return ccr, err
+ }
+ specgenString, err := jsoniter.MarshalToString(s)
+ if err != nil {
+ return ccr, nil
+ }
+ stringReader := strings.NewReader(specgenString)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/containers/create", nil)
+ if err != nil {
+ return ccr, err
+ }
+ return ccr, response.Process(&ccr)
+}
diff --git a/pkg/seccomp/seccomp.go b/pkg/seccomp/seccomp.go
index dcf255378..eeba46a72 100644
--- a/pkg/seccomp/seccomp.go
+++ b/pkg/seccomp/seccomp.go
@@ -6,7 +6,7 @@ import (
"github.com/pkg/errors"
)
-// ContianerImageLabel is the key of the image annotation embedding a seccomp
+// ContainerImageLabel is the key of the image annotation embedding a seccomp
// profile.
const ContainerImageLabel = "io.containers.seccomp.profile"
diff --git a/pkg/spec/config_linux.go b/pkg/spec/config_linux.go
index 5f39b6d0d..544c0020d 100644
--- a/pkg/spec/config_linux.go
+++ b/pkg/spec/config_linux.go
@@ -32,8 +32,8 @@ func Device(d *configs.Device) spec.LinuxDevice {
}
}
-// devicesFromPath computes a list of devices
-func devicesFromPath(g *generate.Generator, devicePath string) error {
+// DevicesFromPath computes a list of devices
+func DevicesFromPath(g *generate.Generator, devicePath string) error {
devs := strings.Split(devicePath, ":")
resolvedDevicePath := devs[0]
// check if it is a symbolic link
@@ -216,7 +216,7 @@ func getDevices(path string) ([]*configs.Device, error) {
return out, nil
}
-func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error {
+func addPrivilegedDevices(g *generate.Generator) error {
hostDevices, err := getDevices("/dev")
if err != nil {
return err
@@ -280,16 +280,16 @@ func (c *CreateConfig) createBlockIO() (*spec.LinuxBlockIO, error) {
var lwds []spec.LinuxWeightDevice
ret = bio
for _, i := range c.Resources.BlkioWeightDevice {
- wd, err := validateweightDevice(i)
+ wd, err := ValidateweightDevice(i)
if err != nil {
return ret, errors.Wrapf(err, "invalid values for blkio-weight-device")
}
- wdStat, err := getStatFromPath(wd.path)
+ wdStat, err := GetStatFromPath(wd.Path)
if err != nil {
- return ret, errors.Wrapf(err, "error getting stat from path %q", wd.path)
+ return ret, errors.Wrapf(err, "error getting stat from path %q", wd.Path)
}
lwd := spec.LinuxWeightDevice{
- Weight: &wd.weight,
+ Weight: &wd.Weight,
}
lwd.Major = int64(unix.Major(wdStat.Rdev))
lwd.Minor = int64(unix.Minor(wdStat.Rdev))
@@ -347,7 +347,7 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott
if err != nil {
return []spec.LinuxThrottleDevice{}, err
}
- ltdStat, err := getStatFromPath(t.path)
+ ltdStat, err := GetStatFromPath(t.path)
if err != nil {
return ltds, errors.Wrapf(err, "error getting stat from path %q", t.path)
}
@@ -361,7 +361,7 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott
return ltds, nil
}
-func getStatFromPath(path string) (unix.Stat_t, error) {
+func GetStatFromPath(path string) (unix.Stat_t, error) {
s := unix.Stat_t{}
err := unix.Stat(path, &s)
return s, err
diff --git a/pkg/spec/config_unsupported.go b/pkg/spec/config_unsupported.go
index be3e7046d..568afde55 100644
--- a/pkg/spec/config_unsupported.go
+++ b/pkg/spec/config_unsupported.go
@@ -15,7 +15,7 @@ func addDevice(g *generate.Generator, device string) error {
return errors.New("function not implemented")
}
-func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error {
+func addPrivilegedDevices(g *generate.Generator) error {
return errors.New("function not implemented")
}
@@ -27,7 +27,7 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott
return nil, errors.New("function not implemented")
}
-func devicesFromPath(g *generate.Generator, devicePath string) error {
+func DevicesFromPath(g *generate.Generator, devicePath string) error {
return errors.New("function not implemented")
}
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 8010be0d4..5011df496 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -126,6 +126,7 @@ type SecurityConfig struct {
}
// CreateConfig is a pre OCI spec structure. It represents user input from varlink or the CLI
+// swagger:model CreateConfig
type CreateConfig struct {
Annotations map[string]string
Args []string
@@ -386,6 +387,6 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
// AddPrivilegedDevices iterates through host devices and adds all
// host devices to the spec
-func (c *CreateConfig) AddPrivilegedDevices(g *generate.Generator) error {
- return c.addPrivilegedDevices(g)
+func AddPrivilegedDevices(g *generate.Generator) error {
+ return addPrivilegedDevices(g)
}
diff --git a/pkg/spec/parse.go b/pkg/spec/parse.go
index a5dfccdb9..38d93b87f 100644
--- a/pkg/spec/parse.go
+++ b/pkg/spec/parse.go
@@ -19,12 +19,12 @@ const Pod = "pod"
// weightDevice is a structure that holds device:weight pair
type weightDevice struct {
- path string
- weight uint16
+ Path string
+ Weight uint16
}
func (w *weightDevice) String() string {
- return fmt.Sprintf("%s:%d", w.path, w.weight)
+ return fmt.Sprintf("%s:%d", w.Path, w.Weight)
}
// LinuxNS is a struct that contains namespace information
@@ -59,9 +59,9 @@ func NS(s string) string {
return ""
}
-// validateweightDevice validates that the specified string has a valid device-weight format
+// ValidateweightDevice validates that the specified string has a valid device-weight format
// for blkio-weight-device flag
-func validateweightDevice(val string) (*weightDevice, error) {
+func ValidateweightDevice(val string) (*weightDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
@@ -78,8 +78,8 @@ func validateweightDevice(val string) (*weightDevice, error) {
}
return &weightDevice{
- path: split[0],
- weight: uint16(weight),
+ Path: split[0],
+ Weight: uint16(weight),
}, nil
}
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index b2a152a2d..a4ae22efd 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -16,9 +16,9 @@ import (
"github.com/pkg/errors"
)
-const cpuPeriod = 100000
+const CpuPeriod = 100000
-func getAvailableGids() (int64, error) {
+func GetAvailableGids() (int64, error) {
idMap, err := user.ParseIDMapFile("/proc/self/gid_map")
if err != nil {
return 0, err
@@ -80,7 +80,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
gid5Available := true
if isRootless {
- nGids, err := getAvailableGids()
+ nGids, err := GetAvailableGids()
if err != nil {
return nil, err
}
@@ -197,8 +197,8 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
addedResources = true
}
if config.Resources.CPUs != 0 {
- g.SetLinuxResourcesCPUPeriod(cpuPeriod)
- g.SetLinuxResourcesCPUQuota(int64(config.Resources.CPUs * cpuPeriod))
+ g.SetLinuxResourcesCPUPeriod(CpuPeriod)
+ g.SetLinuxResourcesCPUQuota(int64(config.Resources.CPUs * CpuPeriod))
addedResources = true
}
if config.Resources.CPURtRuntime != 0 {
@@ -223,12 +223,12 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
// If privileged, we need to add all the host devices to the
// spec. We do not add the user provided ones because we are
// already adding them all.
- if err := config.AddPrivilegedDevices(&g); err != nil {
+ if err := AddPrivilegedDevices(&g); err != nil {
return nil, err
}
} else {
for _, devicePath := range config.Devices {
- if err := devicesFromPath(&g, devicePath); err != nil {
+ if err := DevicesFromPath(&g, devicePath); err != nil {
return nil, err
}
}
@@ -241,23 +241,35 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// SECURITY OPTS
+ var runtimeConfig *libpodconfig.Config
+
+ if runtime != nil {
+ runtimeConfig, err = runtime.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+ }
+
g.SetProcessNoNewPrivileges(config.Security.NoNewPrivs)
if !config.Security.Privileged {
g.SetProcessApparmorProfile(config.Security.ApparmorProfile)
}
- blockAccessToKernelFilesystems(config, &g)
-
- var runtimeConfig *libpodconfig.Config
-
- if runtime != nil {
- runtimeConfig, err = runtime.GetConfig()
- if err != nil {
+ // Unless already set via the CLI, check if we need to disable process
+ // labels or set the defaults.
+ if len(config.Security.LabelOpts) == 0 && runtimeConfig != nil {
+ if !runtimeConfig.EnableLabeling {
+ // Disabled in the config.
+ config.Security.LabelOpts = append(config.Security.LabelOpts, "disable")
+ } else if err := config.Security.SetLabelOpts(runtime, &config.Pid, &config.Ipc); err != nil {
+ // Defaults!
return nil, err
}
}
+ BlockAccessToKernelFilesystems(config.Security.Privileged, config.Pid.PidMode.IsHost(), &g)
+
// RESOURCES - PIDS
if config.Resources.PidsLimit > 0 {
// if running on rootless on a cgroupv1 machine or using the cgroupfs manager, pids
@@ -320,9 +332,9 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// BIND MOUNTS
- configSpec.Mounts = supercedeUserMounts(userMounts, configSpec.Mounts)
+ configSpec.Mounts = SupercedeUserMounts(userMounts, configSpec.Mounts)
// Process mounts to ensure correct options
- finalMounts, err := initFSMounts(configSpec.Mounts)
+ finalMounts, err := InitFSMounts(configSpec.Mounts)
if err != nil {
return nil, err
}
@@ -404,8 +416,8 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
return configSpec, nil
}
-func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) {
- if !config.Security.Privileged {
+func BlockAccessToKernelFilesystems(privileged, pidModeIsHost bool, g *generate.Generator) {
+ if !privileged {
for _, mp := range []string{
"/proc/acpi",
"/proc/kcore",
@@ -421,7 +433,7 @@ func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator)
g.AddLinuxMaskedPaths(mp)
}
- if config.Pid.PidMode.IsHost() && rootless.IsRootless() {
+ if pidModeIsHost && rootless.IsRootless() {
return
}
diff --git a/pkg/spec/storage.go b/pkg/spec/storage.go
index 0e2098c1d..e37fa2451 100644
--- a/pkg/spec/storage.go
+++ b/pkg/spec/storage.go
@@ -825,7 +825,7 @@ func (config *CreateConfig) addContainerInitBinary(path string) (spec.Mount, err
// TODO: Should we unmount subtree mounts? E.g., if /tmp/ is mounted by
// one mount, and we already have /tmp/a and /tmp/b, should we remove
// the /tmp/a and /tmp/b mounts in favor of the more general /tmp?
-func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.Mount {
+func SupercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.Mount {
if len(mounts) > 0 {
// If we have overlappings mounts, remove them from the spec in favor of
// the user-added volume mounts
@@ -854,7 +854,7 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
}
// Ensure mount options on all mounts are correct
-func initFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) {
+func InitFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) {
// We need to look up mounts so we can figure out the proper mount flags
// to apply.
systemMounts, err := pmount.GetMounts()
diff --git a/pkg/specgen/config_linux_cgo.go b/pkg/specgen/config_linux_cgo.go
new file mode 100644
index 000000000..6f547a40d
--- /dev/null
+++ b/pkg/specgen/config_linux_cgo.go
@@ -0,0 +1,62 @@
+// +build linux,cgo
+
+package specgen
+
+import (
+ "context"
+ "io/ioutil"
+
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/seccomp"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ goSeccomp "github.com/seccomp/containers-golang"
+ "github.com/sirupsen/logrus"
+)
+
+func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec, img *image.Image) (*spec.LinuxSeccomp, error) {
+ var seccompConfig *spec.LinuxSeccomp
+ var err error
+
+ scp, err := seccomp.LookupPolicy(s.SeccompPolicy)
+ if err != nil {
+ return nil, err
+ }
+
+ if scp == seccomp.PolicyImage {
+ labels, err := img.Labels(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ imagePolicy := labels[seccomp.ContainerImageLabel]
+ if len(imagePolicy) < 1 {
+ return nil, errors.New("no seccomp policy defined by image")
+ }
+ logrus.Debug("Loading seccomp profile from the security config")
+ seccompConfig, err = goSeccomp.LoadProfile(imagePolicy, configSpec)
+ if err != nil {
+ return nil, errors.Wrap(err, "loading seccomp profile failed")
+ }
+ return seccompConfig, nil
+ }
+
+ if s.SeccompProfilePath != "" {
+ logrus.Debugf("Loading seccomp profile from %q", s.SeccompProfilePath)
+ seccompProfile, err := ioutil.ReadFile(s.SeccompProfilePath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "opening seccomp profile (%s) failed", s.SeccompProfilePath)
+ }
+ seccompConfig, err = goSeccomp.LoadProfile(string(seccompProfile), configSpec)
+ if err != nil {
+ return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", s.SeccompProfilePath)
+ }
+ } else {
+ logrus.Debug("Loading default seccomp profile")
+ seccompConfig, err = goSeccomp.GetDefaultProfile(configSpec)
+ if err != nil {
+ return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", s.SeccompProfilePath)
+ }
+ }
+
+ return seccompConfig, nil
+}
diff --git a/pkg/specgen/config_linux_nocgo.go b/pkg/specgen/config_linux_nocgo.go
new file mode 100644
index 000000000..fc0c58c37
--- /dev/null
+++ b/pkg/specgen/config_linux_nocgo.go
@@ -0,0 +1,11 @@
+// +build linux,!cgo
+
+package specgen
+
+import (
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+ return nil, nil
+}
diff --git a/pkg/specgen/config_unsupported.go b/pkg/specgen/config_unsupported.go
new file mode 100644
index 000000000..5d24ac39c
--- /dev/null
+++ b/pkg/specgen/config_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package specgen
+
+import (
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+)
+
+func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+ return nil, errors.New("function not supported on non-linux OS's")
+}
diff --git a/pkg/specgen/create.go b/pkg/specgen/create.go
new file mode 100644
index 000000000..c8fee5f05
--- /dev/null
+++ b/pkg/specgen/create.go
@@ -0,0 +1,187 @@
+package specgen
+
+import (
+ "context"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/config"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "os"
+)
+
+// MakeContainer creates a container based on the SpecGenerator
+func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, error) {
+ var pod *libpod.Pod
+ if err := s.validate(rt); err != nil {
+ return nil, errors.Wrap(err, "invalid config provided")
+ }
+ rtc, err := rt.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ options, err := s.createContainerOptions(rt, pod)
+ if err != nil {
+ return nil, err
+ }
+
+ podmanPath, err := os.Executable()
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, s.createExitCommandOption(rtc, podmanPath))
+ newImage, err := rt.ImageRuntime().NewFromLocal(s.Image)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO mheon wants to talk with Dan about this
+ useImageVolumes := s.ImageVolumeMode == "bind"
+ options = append(options, libpod.WithRootFSFromImage(newImage.ID(), s.Image, useImageVolumes))
+
+ runtimeSpec, err := s.toOCISpec(rt, newImage)
+ if err != nil {
+ return nil, err
+ }
+ return rt.NewContainer(context.Background(), runtimeSpec, options...)
+}
+
+func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime, pod *libpod.Pod) ([]libpod.CtrCreateOption, error) {
+ var options []libpod.CtrCreateOption
+ var err error
+
+ if s.Stdin {
+ options = append(options, libpod.WithStdin())
+ }
+ if len(s.Systemd) > 0 {
+ options = append(options, libpod.WithSystemd())
+ }
+ if len(s.Name) > 0 {
+ logrus.Debugf("setting container name %s", s.Name)
+ options = append(options, libpod.WithName(s.Name))
+ }
+ if s.Pod != "" {
+ logrus.Debugf("adding container to pod %s", s.Pod)
+ options = append(options, rt.WithPod(pod))
+ }
+ destinations := []string{}
+ // // Take all mount and named volume destinations.
+ for _, mount := range s.Mounts {
+ destinations = append(destinations, mount.Destination)
+ }
+ for _, volume := range s.Volumes {
+ destinations = append(destinations, volume.Dest)
+ }
+ options = append(options, libpod.WithUserVolumes(destinations))
+
+ if len(s.Volumes) != 0 {
+ options = append(options, libpod.WithNamedVolumes(s.Volumes))
+ }
+
+ if len(s.Command) != 0 {
+ options = append(options, libpod.WithCommand(s.Command))
+ }
+
+ options = append(options, libpod.WithEntrypoint(s.Entrypoint))
+ if s.StopSignal != nil {
+ options = append(options, libpod.WithStopSignal(*s.StopSignal))
+ }
+ if s.StopTimeout != nil {
+ options = append(options, libpod.WithStopTimeout(*s.StopTimeout))
+ }
+ if s.LogConfiguration != nil {
+ if len(s.LogConfiguration.Path) > 0 {
+ options = append(options, libpod.WithLogPath(s.LogConfiguration.Path))
+ }
+ if len(s.LogConfiguration.Options) > 0 && s.LogConfiguration.Options["tag"] != "" {
+ // Note: I'm really guessing here.
+ options = append(options, libpod.WithLogTag(s.LogConfiguration.Options["tag"]))
+ }
+
+ if len(s.LogConfiguration.Driver) > 0 {
+ options = append(options, libpod.WithLogDriver(s.LogConfiguration.Driver))
+ }
+ }
+
+ // Security options
+ if len(s.SelinuxOpts) > 0 {
+ options = append(options, libpod.WithSecLabels(s.SelinuxOpts))
+ }
+ options = append(options, libpod.WithPrivileged(s.Privileged))
+
+ // Get namespace related options
+ namespaceOptions, err := s.generateNamespaceContainerOpts(rt)
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, namespaceOptions...)
+
+ // TODO NetworkNS still needs to be done!
+ if len(s.ConmonPidFile) > 0 {
+ options = append(options, libpod.WithConmonPidFile(s.ConmonPidFile))
+ }
+ options = append(options, libpod.WithLabels(s.Labels))
+ if s.ShmSize != nil {
+ options = append(options, libpod.WithShmSize(*s.ShmSize))
+ }
+ if s.Rootfs != "" {
+ options = append(options, libpod.WithRootFS(s.Rootfs))
+ }
+ // Default used if not overridden on command line
+
+ if s.RestartPolicy != "" {
+ if s.RestartPolicy == "unless-stopped" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "the unless-stopped restart policy is not supported")
+ }
+ if s.RestartRetries != nil {
+ options = append(options, libpod.WithRestartRetries(*s.RestartRetries))
+ }
+ options = append(options, libpod.WithRestartPolicy(s.RestartPolicy))
+ }
+
+ if s.ContainerHealthCheckConfig.HealthConfig != nil {
+ options = append(options, libpod.WithHealthCheck(s.ContainerHealthCheckConfig.HealthConfig))
+ logrus.Debugf("New container has a health check")
+ }
+ return options, nil
+}
+
+func (s *SpecGenerator) createExitCommandOption(config *config.Config, podmanPath string) libpod.CtrCreateOption {
+ // We need a cleanup process for containers in the current model.
+ // But we can't assume that the caller is Podman - it could be another
+ // user of the API.
+ // As such, provide a way to specify a path to Podman, so we can
+ // still invoke a cleanup process.
+
+ command := []string{podmanPath,
+ "--root", config.StorageConfig.GraphRoot,
+ "--runroot", config.StorageConfig.RunRoot,
+ "--log-level", logrus.GetLevel().String(),
+ "--cgroup-manager", config.CgroupManager,
+ "--tmpdir", config.TmpDir,
+ }
+ if config.OCIRuntime != "" {
+ command = append(command, []string{"--runtime", config.OCIRuntime}...)
+ }
+ if config.StorageConfig.GraphDriverName != "" {
+ command = append(command, []string{"--storage-driver", config.StorageConfig.GraphDriverName}...)
+ }
+ for _, opt := range config.StorageConfig.GraphDriverOptions {
+ command = append(command, []string{"--storage-opt", opt}...)
+ }
+ if config.EventsLogger != "" {
+ command = append(command, []string{"--events-backend", config.EventsLogger}...)
+ }
+
+ // TODO Mheon wants to leave this for now
+ //if s.sys {
+ // command = append(command, "--syslog", "true")
+ //}
+ command = append(command, []string{"container", "cleanup"}...)
+
+ if s.Remove {
+ command = append(command, "--rm")
+ }
+ return libpod.WithExitCommand(command)
+}
diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go
new file mode 100644
index 000000000..025cb31e0
--- /dev/null
+++ b/pkg/specgen/namespaces.go
@@ -0,0 +1,467 @@
+package specgen
+
+import (
+ "os"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/capabilities"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type NamespaceMode string
+
+const (
+ // Host means the the namespace is derived from
+ // the host
+ Host NamespaceMode = "host"
+ // Path is the path to a namespace
+ Path NamespaceMode = "path"
+ // FromContainer means namespace is derived from a
+ // different container
+ FromContainer NamespaceMode = "container"
+ // FromPod indicates the namespace is derived from a pod
+ FromPod NamespaceMode = "pod"
+ // Private indicates the namespace is private
+ Private NamespaceMode = "private"
+ // NoNetwork indicates no network namespace should
+ // be joined. loopback should still exists
+ NoNetwork NamespaceMode = "none"
+ // Bridge indicates that a CNI network stack
+ // should be used
+ Bridge NamespaceMode = "bridge"
+ // Slirp indicates that a slirp4ns network stack should
+ // be used
+ Slirp NamespaceMode = "slirp4ns"
+)
+
+// Namespace describes the namespace
+type Namespace struct {
+ NSMode NamespaceMode `json:"nsmode,omitempty"`
+ Value string `json:"string,omitempty"`
+}
+
+// IsHost returns a bool if the namespace is host based
+func (n *Namespace) IsHost() bool {
+ return n.NSMode == Host
+}
+
+// IsPath indicates via bool if the namespace is based on a path
+func (n *Namespace) IsPath() bool {
+ return n.NSMode == Path
+}
+
+// IsContainer indicates via bool if the namespace is based on a container
+func (n *Namespace) IsContainer() bool {
+ return n.NSMode == FromContainer
+}
+
+// IsPod indicates via bool if the namespace is based on a pod
+func (n *Namespace) IsPod() bool {
+ return n.NSMode == FromPod
+}
+
+// IsPrivate indicates the namespace is private
+func (n *Namespace) IsPrivate() bool {
+ return n.NSMode == Private
+}
+
+// validate perform simple validation on the namespace to make sure it is not
+// invalid from the get-go
+func (n *Namespace) validate() error {
+ if n == nil {
+ return nil
+ }
+ switch n.NSMode {
+ case Host, Path, FromContainer, FromPod, Private, NoNetwork, Bridge, Slirp:
+ break
+ default:
+ return errors.Errorf("invalid network %q", n.NSMode)
+ }
+ // Path and From Container MUST have a string value set
+ if n.NSMode == Path || n.NSMode == FromContainer {
+ if len(n.Value) < 1 {
+ return errors.Errorf("namespace mode %s requires a value", n.NSMode)
+ }
+ } else {
+ // All others must NOT set a string value
+ if len(n.Value) > 0 {
+ return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode)
+ }
+ }
+ return nil
+}
+
+func (s *SpecGenerator) generateNamespaceContainerOpts(rt *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ var portBindings []ocicni.PortMapping
+ options := make([]libpod.CtrCreateOption, 0)
+
+ // Cgroups
+ switch {
+ case s.CgroupNS.IsPrivate():
+ ns := s.CgroupNS.Value
+ if _, err := os.Stat(ns); err != nil {
+ return nil, err
+ }
+ case s.CgroupNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.CgroupNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.CgroupNS.Value)
+ }
+ options = append(options, libpod.WithCgroupNSFrom(connectedCtr))
+ // TODO
+ //default:
+ // return nil, errors.New("cgroup name only supports private and container")
+ }
+
+ if s.CgroupParent != "" {
+ options = append(options, libpod.WithCgroupParent(s.CgroupParent))
+ }
+
+ if s.CgroupsMode != "" {
+ options = append(options, libpod.WithCgroupsMode(s.CgroupsMode))
+ }
+
+ // ipc
+ switch {
+ case s.IpcNS.IsHost():
+ options = append(options, libpod.WithShmDir("/dev/shm"))
+ case s.IpcNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.IpcNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.IpcNS.Value)
+ }
+ options = append(options, libpod.WithIPCNSFrom(connectedCtr))
+ options = append(options, libpod.WithShmDir(connectedCtr.ShmDir()))
+ }
+
+ // pid
+ if s.PidNS.IsContainer() {
+ connectedCtr, err := rt.LookupContainer(s.PidNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.PidNS.Value)
+ }
+ options = append(options, libpod.WithPIDNSFrom(connectedCtr))
+ }
+
+ // uts
+ switch {
+ case s.UtsNS.IsPod():
+ connectedPod, err := rt.LookupPod(s.UtsNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "pod %q not found", s.UtsNS.Value)
+ }
+ options = append(options, libpod.WithUTSNSFromPod(connectedPod))
+ case s.UtsNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.UtsNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.UtsNS.Value)
+ }
+
+ options = append(options, libpod.WithUTSNSFrom(connectedCtr))
+ }
+
+ if s.UseImageHosts {
+ options = append(options, libpod.WithUseImageHosts())
+ } else if len(s.HostAdd) > 0 {
+ options = append(options, libpod.WithHosts(s.HostAdd))
+ }
+
+ // User
+
+ switch {
+ case s.UserNS.IsPath():
+ ns := s.UserNS.Value
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined user namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ if s.IDMappings != nil {
+ options = append(options, libpod.WithIDMappings(*s.IDMappings))
+ }
+ case s.UserNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.UserNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.UserNS.Value)
+ }
+ options = append(options, libpod.WithUserNSFrom(connectedCtr))
+ default:
+ if s.IDMappings != nil {
+ options = append(options, libpod.WithIDMappings(*s.IDMappings))
+ }
+ }
+
+ options = append(options, libpod.WithUser(s.User))
+ options = append(options, libpod.WithGroups(s.Groups))
+
+ if len(s.PortMappings) > 0 {
+ portBindings = s.PortMappings
+ }
+
+ switch {
+ case s.NetNS.IsPath():
+ ns := s.NetNS.Value
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined network namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ case s.NetNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.NetNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.NetNS.Value)
+ }
+ options = append(options, libpod.WithNetNSFrom(connectedCtr))
+ case !s.NetNS.IsHost() && s.NetNS.NSMode != NoNetwork:
+ postConfigureNetNS := !s.UserNS.IsHost()
+ options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(s.NetNS.NSMode), s.CNINetworks))
+ }
+
+ if len(s.DNSSearch) > 0 {
+ options = append(options, libpod.WithDNSSearch(s.DNSSearch))
+ }
+ if len(s.DNSServer) > 0 {
+ // TODO I'm not sure how we are going to handle this given the input
+ if len(s.DNSServer) == 1 { //&& strings.ToLower(s.DNSServer[0].) == "none" {
+ options = append(options, libpod.WithUseImageResolvConf())
+ } else {
+ var dnsServers []string
+ for _, d := range s.DNSServer {
+ dnsServers = append(dnsServers, d.String())
+ }
+ options = append(options, libpod.WithDNS(dnsServers))
+ }
+ }
+ if len(s.DNSOption) > 0 {
+ options = append(options, libpod.WithDNSOption(s.DNSOption))
+ }
+ if s.StaticIP != nil {
+ options = append(options, libpod.WithStaticIP(*s.StaticIP))
+ }
+
+ if s.StaticMAC != nil {
+ options = append(options, libpod.WithStaticMAC(*s.StaticMAC))
+ }
+ return options, nil
+}
+
+func (s *SpecGenerator) pidConfigureGenerator(g *generate.Generator) error {
+ if s.PidNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), s.PidNS.Value)
+ }
+ if s.PidNS.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.PIDNamespace))
+ }
+ if s.PidNS.IsContainer() {
+ logrus.Debugf("using container %s pidmode", s.PidNS.Value)
+ }
+ if s.PidNS.IsPod() {
+ logrus.Debug("using pod pidmode")
+ }
+ return nil
+}
+
+func (s *SpecGenerator) utsConfigureGenerator(g *generate.Generator, runtime *libpod.Runtime) error {
+ hostname := s.Hostname
+ var err error
+ if hostname == "" {
+ switch {
+ case s.UtsNS.IsContainer():
+ utsCtr, err := runtime.GetContainer(s.UtsNS.Value)
+ if err != nil {
+ return errors.Wrapf(err, "unable to retrieve hostname from dependency container %s", s.UtsNS.Value)
+ }
+ hostname = utsCtr.Hostname()
+ case s.NetNS.IsHost() || s.UtsNS.IsHost():
+ hostname, err = os.Hostname()
+ if err != nil {
+ return errors.Wrap(err, "unable to retrieve hostname of the host")
+ }
+ default:
+ logrus.Debug("No hostname set; container's hostname will default to runtime default")
+ }
+ }
+ g.RemoveHostname()
+ if s.Hostname != "" || !s.UtsNS.IsHost() {
+ // Set the hostname in the OCI configuration only
+ // if specified by the user or if we are creating
+ // a new UTS namespace.
+ g.SetHostname(hostname)
+ }
+ g.AddProcessEnv("HOSTNAME", hostname)
+
+ if s.UtsNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), s.UtsNS.Value)
+ }
+ if s.UtsNS.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.UTSNamespace))
+ }
+ if s.UtsNS.IsContainer() {
+ logrus.Debugf("using container %s utsmode", s.UtsNS.Value)
+ }
+ return nil
+}
+
+func (s *SpecGenerator) ipcConfigureGenerator(g *generate.Generator) error {
+ if s.IpcNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), s.IpcNS.Value)
+ }
+ if s.IpcNS.IsHost() {
+ return g.RemoveLinuxNamespace(s.IpcNS.Value)
+ }
+ if s.IpcNS.IsContainer() {
+ logrus.Debugf("Using container %s ipcmode", s.IpcNS.Value)
+ }
+ return nil
+}
+
+func (s *SpecGenerator) cgroupConfigureGenerator(g *generate.Generator) error {
+ if s.CgroupNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), s.CgroupNS.Value)
+ }
+ if s.CgroupNS.IsHost() {
+ return g.RemoveLinuxNamespace(s.CgroupNS.Value)
+ }
+ if s.CgroupNS.IsPrivate() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), "")
+ }
+ if s.CgroupNS.IsContainer() {
+ logrus.Debugf("Using container %s cgroup mode", s.CgroupNS.Value)
+ }
+ return nil
+}
+
+func (s *SpecGenerator) networkConfigureGenerator(g *generate.Generator) error {
+ switch {
+ case s.NetNS.IsHost():
+ logrus.Debug("Using host netmode")
+ if err := g.RemoveLinuxNamespace(string(spec.NetworkNamespace)); err != nil {
+ return err
+ }
+
+ case s.NetNS.NSMode == NoNetwork:
+ logrus.Debug("Using none netmode")
+ case s.NetNS.NSMode == Bridge:
+ logrus.Debug("Using bridge netmode")
+ case s.NetNS.IsContainer():
+ logrus.Debugf("using container %s netmode", s.NetNS.Value)
+ case s.NetNS.IsPath():
+ logrus.Debug("Using ns netmode")
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), s.NetNS.Value); err != nil {
+ return err
+ }
+ case s.NetNS.IsPod():
+ logrus.Debug("Using pod netmode, unless pod is not sharing")
+ case s.NetNS.NSMode == Slirp:
+ logrus.Debug("Using slirp4netns netmode")
+ default:
+ return errors.Errorf("unknown network mode")
+ }
+
+ if g.Config.Annotations == nil {
+ g.Config.Annotations = make(map[string]string)
+ }
+
+ if s.PublishImagePorts {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseTrue
+ } else {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseFalse
+ }
+
+ return nil
+}
+
+func (s *SpecGenerator) userConfigureGenerator(g *generate.Generator) error {
+ if s.UserNS.IsPath() {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), s.UserNS.Value); err != nil {
+ return err
+ }
+ // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping
+ g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1))
+ g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1))
+ }
+
+ if s.IDMappings != nil {
+ if (len(s.IDMappings.UIDMap) > 0 || len(s.IDMappings.GIDMap) > 0) && !s.UserNS.IsHost() {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
+ return err
+ }
+ }
+ for _, uidmap := range s.IDMappings.UIDMap {
+ g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
+ }
+ for _, gidmap := range s.IDMappings.GIDMap {
+ g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
+ }
+ }
+ return nil
+}
+
+func (s *SpecGenerator) securityConfigureGenerator(g *generate.Generator, newImage *image.Image) error {
+ // HANDLE CAPABILITIES
+ // NOTE: Must happen before SECCOMP
+ if s.Privileged {
+ g.SetupPrivileged(true)
+ }
+
+ useNotRoot := func(user string) bool {
+ if user == "" || user == "root" || user == "0" {
+ return false
+ }
+ return true
+ }
+ configSpec := g.Config
+ var err error
+ var caplist []string
+ bounding := configSpec.Process.Capabilities.Bounding
+ if useNotRoot(s.User) {
+ configSpec.Process.Capabilities.Bounding = caplist
+ }
+ caplist, err = capabilities.MergeCapabilities(configSpec.Process.Capabilities.Bounding, s.CapAdd, s.CapDrop)
+ if err != nil {
+ return err
+ }
+
+ configSpec.Process.Capabilities.Bounding = caplist
+ configSpec.Process.Capabilities.Permitted = caplist
+ configSpec.Process.Capabilities.Inheritable = caplist
+ configSpec.Process.Capabilities.Effective = caplist
+ configSpec.Process.Capabilities.Ambient = caplist
+ if useNotRoot(s.User) {
+ caplist, err = capabilities.MergeCapabilities(bounding, s.CapAdd, s.CapDrop)
+ if err != nil {
+ return err
+ }
+ }
+ configSpec.Process.Capabilities.Bounding = caplist
+
+ // HANDLE SECCOMP
+ if s.SeccompProfilePath != "unconfined" {
+ seccompConfig, err := s.getSeccompConfig(configSpec, newImage)
+ if err != nil {
+ return err
+ }
+ configSpec.Linux.Seccomp = seccompConfig
+ }
+
+ // Clear default Seccomp profile from Generator for privileged containers
+ if s.SeccompProfilePath == "unconfined" || s.Privileged {
+ configSpec.Linux.Seccomp = nil
+ }
+
+ g.SetRootReadonly(s.ReadOnlyFilesystem)
+ for sysctlKey, sysctlVal := range s.Sysctl {
+ g.AddLinuxSysctl(sysctlKey, sysctlVal)
+ }
+
+ return nil
+}
diff --git a/pkg/specgen/oci.go b/pkg/specgen/oci.go
new file mode 100644
index 000000000..2523f21b3
--- /dev/null
+++ b/pkg/specgen/oci.go
@@ -0,0 +1,260 @@
+package specgen
+
+import (
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/rootless"
+ createconfig "github.com/containers/libpod/pkg/spec"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+)
+
+func (s *SpecGenerator) toOCISpec(rt *libpod.Runtime, newImage *image.Image) (*spec.Spec, error) {
+ var (
+ inUserNS bool
+ )
+ cgroupPerm := "ro"
+ g, err := generate.New("linux")
+ if err != nil {
+ return nil, err
+ }
+ // Remove the default /dev/shm mount to ensure we overwrite it
+ g.RemoveMount("/dev/shm")
+ g.HostSpecific = true
+ addCgroup := true
+ canMountSys := true
+
+ isRootless := rootless.IsRootless()
+ if isRootless {
+ inUserNS = true
+ }
+ if !s.UserNS.IsHost() {
+ if s.UserNS.IsContainer() || s.UserNS.IsPath() {
+ inUserNS = true
+ }
+ if s.UserNS.IsPrivate() {
+ inUserNS = true
+ }
+ }
+ if inUserNS && s.NetNS.IsHost() {
+ canMountSys = false
+ }
+
+ if s.Privileged && canMountSys {
+ cgroupPerm = "rw"
+ g.RemoveMount("/sys")
+ sysMnt := spec.Mount{
+ Destination: "/sys",
+ Type: "sysfs",
+ Source: "sysfs",
+ Options: []string{"rprivate", "nosuid", "noexec", "nodev", "rw"},
+ }
+ g.AddMount(sysMnt)
+ } else if !canMountSys {
+ addCgroup = false
+ g.RemoveMount("/sys")
+ r := "ro"
+ if s.Privileged {
+ r = "rw"
+ }
+ sysMnt := spec.Mount{
+ Destination: "/sys",
+ Type: "bind", // should we use a constant for this, like createconfig?
+ Source: "/sys",
+ Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"},
+ }
+ g.AddMount(sysMnt)
+ if !s.Privileged && isRootless {
+ g.AddLinuxMaskedPaths("/sys/kernel")
+ }
+ }
+ gid5Available := true
+ if isRootless {
+ nGids, err := createconfig.GetAvailableGids()
+ if err != nil {
+ return nil, err
+ }
+ gid5Available = nGids >= 5
+ }
+ // When using a different user namespace, check that the GID 5 is mapped inside
+ // the container.
+ if gid5Available && (s.IDMappings != nil && len(s.IDMappings.GIDMap) > 0) {
+ mappingFound := false
+ for _, r := range s.IDMappings.GIDMap {
+ if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size {
+ mappingFound = true
+ break
+ }
+ }
+ if !mappingFound {
+ gid5Available = false
+ }
+
+ }
+ if !gid5Available {
+ // If we have no GID mappings, the gid=5 default option would fail, so drop it.
+ g.RemoveMount("/dev/pts")
+ devPts := spec.Mount{
+ Destination: "/dev/pts",
+ Type: "devpts",
+ Source: "devpts",
+ Options: []string{"rprivate", "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"},
+ }
+ g.AddMount(devPts)
+ }
+
+ if inUserNS && s.IpcNS.IsHost() {
+ g.RemoveMount("/dev/mqueue")
+ devMqueue := spec.Mount{
+ Destination: "/dev/mqueue",
+ Type: "bind", // constant ?
+ Source: "/dev/mqueue",
+ Options: []string{"bind", "nosuid", "noexec", "nodev"},
+ }
+ g.AddMount(devMqueue)
+ }
+ if inUserNS && s.PidNS.IsHost() {
+ g.RemoveMount("/proc")
+ procMount := spec.Mount{
+ Destination: "/proc",
+ Type: createconfig.TypeBind,
+ Source: "/proc",
+ Options: []string{"rbind", "nosuid", "noexec", "nodev"},
+ }
+ g.AddMount(procMount)
+ }
+
+ if addCgroup {
+ cgroupMnt := spec.Mount{
+ Destination: "/sys/fs/cgroup",
+ Type: "cgroup",
+ Source: "cgroup",
+ Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", cgroupPerm},
+ }
+ g.AddMount(cgroupMnt)
+ }
+ g.SetProcessCwd(s.WorkDir)
+ g.SetProcessArgs(s.Command)
+ g.SetProcessTerminal(s.Terminal)
+
+ for key, val := range s.Annotations {
+ g.AddAnnotation(key, val)
+ }
+ g.AddProcessEnv("container", "podman")
+
+ g.Config.Linux.Resources = s.ResourceLimits
+
+ // Devices
+ if s.Privileged {
+ // If privileged, we need to add all the host devices to the
+ // spec. We do not add the user provided ones because we are
+ // already adding them all.
+ if err := createconfig.AddPrivilegedDevices(&g); err != nil {
+ return nil, err
+ }
+ } else {
+ for _, device := range s.Devices {
+ if err := createconfig.DevicesFromPath(&g, device.Path); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // SECURITY OPTS
+ g.SetProcessNoNewPrivileges(s.NoNewPrivileges)
+
+ if !s.Privileged {
+ g.SetProcessApparmorProfile(s.ApparmorProfile)
+ }
+
+ createconfig.BlockAccessToKernelFilesystems(s.Privileged, s.PidNS.IsHost(), &g)
+
+ for name, val := range s.Env {
+ g.AddProcessEnv(name, val)
+ }
+
+ // TODO rlimits and ulimits needs further refinement by someone more
+ // familiar with the code.
+ //if err := addRlimits(config, &g); err != nil {
+ // return nil, err
+ //}
+
+ // NAMESPACES
+
+ if err := s.pidConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.userConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.networkConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.utsConfigureGenerator(&g, rt); err != nil {
+ return nil, err
+ }
+
+ if err := s.ipcConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.cgroupConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+ configSpec := g.Config
+
+ if err := s.securityConfigureGenerator(&g, newImage); err != nil {
+ return nil, err
+ }
+
+ // BIND MOUNTS
+ configSpec.Mounts = createconfig.SupercedeUserMounts(s.Mounts, configSpec.Mounts)
+ // Process mounts to ensure correct options
+ finalMounts, err := createconfig.InitFSMounts(configSpec.Mounts)
+ if err != nil {
+ return nil, err
+ }
+ configSpec.Mounts = finalMounts
+
+ // Add annotations
+ if configSpec.Annotations == nil {
+ configSpec.Annotations = make(map[string]string)
+ }
+
+ // TODO cidfile is not in specgen; when wiring up cli, we will need to move this out of here
+ // leaving as a reminder
+ //if config.CidFile != "" {
+ // configSpec.Annotations[libpod.InspectAnnotationCIDFile] = config.CidFile
+ //}
+
+ if s.Remove {
+ configSpec.Annotations[libpod.InspectAnnotationAutoremove] = libpod.InspectResponseTrue
+ } else {
+ configSpec.Annotations[libpod.InspectAnnotationAutoremove] = libpod.InspectResponseFalse
+ }
+
+ if len(s.VolumesFrom) > 0 {
+ configSpec.Annotations[libpod.InspectAnnotationVolumesFrom] = strings.Join(s.VolumesFrom, ",")
+ }
+
+ if s.Privileged {
+ configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseTrue
+ } else {
+ configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseFalse
+ }
+
+ // TODO Init might not make it into the specgen and therefore is not available here. We should deal
+ // with this when we wire up the CLI; leaving as a reminder
+ //if s.Init {
+ // configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseTrue
+ //} else {
+ // configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseFalse
+ //}
+
+ return configSpec, nil
+}
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index e22ee598f..e1dfe4dc5 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -2,24 +2,28 @@ package specgen
import (
"net"
+ "syscall"
"github.com/containers/image/v5/manifest"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
)
-// TODO
-// mheon provided this an off the cuff suggestion. Adding it here to retain
-// for history as we implement it. When this struct is implemented, we need
-// to remove the nolints.
-type Namespace struct {
- isHost bool //nolint
- isPath string //nolint
- isContainer string //nolint
- isPod bool //nolint
+// LogConfig describes the logging characteristics for a container
+type LogConfig struct {
+ // LogDriver is the container's log driver.
+ // Optional.
+ Driver string `json:"driver,omitempty"`
+ // LogPath is the path the container's logs will be stored at.
+ // Only available if LogDriver is set to "json-file" or "k8s-file".
+ // Optional.
+ Path string `json:"path,omitempty"`
+ // A set of options to accompany the log driver.
+ // Optional.
+ Options map[string]string `json:"options,omitempty"`
}
// ContainerBasicConfig contains the basic parts of a container.
@@ -62,7 +66,7 @@ type ContainerBasicConfig struct {
// If not provided, the default, SIGTERM, will be used.
// Will conflict with Systemd if Systemd is set to "true" or "always".
// Optional.
- StopSignal *uint `json:"stop_signal,omitempty"`
+ StopSignal *syscall.Signal `json:"stop_signal,omitempty"`
// StopTimeout is a timeout between the container's stop signal being
// sent and SIGKILL being sent.
// If not provided, the default will be used.
@@ -70,13 +74,10 @@ type ContainerBasicConfig struct {
// instead.
// Optional.
StopTimeout *uint `json:"stop_timeout,omitempty"`
- // LogDriver is the container's log driver.
- // Optional.
- LogDriver string `json:"log_driver,omitempty"`
- // LogPath is the path the container's logs will be stored at.
- // Only available if LogDriver is set to "json-file" or "k8s-file".
- // Optional.
- LogPath string `json:"log_path,omitempty"`
+ // LogConfiguration describes the logging for a container including
+ // driver, path, and options.
+ // Optional
+ LogConfiguration *LogConfig `json:"log_configuration,omitempty"`
// ConmonPidFile is a path at which a PID file for Conmon will be
// placed.
// If not given, a default location will be used.
@@ -111,12 +112,10 @@ type ContainerBasicConfig struct {
// Namespace is the libpod namespace the container will be placed in.
// Optional.
Namespace string `json:"namespace,omitempty"`
-
// PidNS is the container's PID namespace.
// It defaults to private.
// Mandatory.
PidNS Namespace `json:"pidns,omitempty"`
-
// UtsNS is the container's UTS namespace.
// It defaults to private.
// Must be set to Private to set Hostname.
@@ -128,6 +127,11 @@ type ContainerBasicConfig struct {
// Conflicts with UtsNS if UtsNS is not set to private.
// Optional.
Hostname string `json:"hostname,omitempty"`
+ // Sysctl sets kernel parameters for the container
+ Sysctl map[string]string `json:"sysctl,omitempty"`
+ // Remove indicates if the container should be removed once it has been started
+ // and exits
+ Remove bool `json:"remove"`
}
// ContainerStorageConfig contains information on the storage configuration of a
@@ -175,7 +179,7 @@ type ContainerStorageConfig struct {
// Mandatory.
IpcNS Namespace `json:"ipcns,omitempty"`
// ShmSize is the size of the tmpfs to mount in at /dev/shm, in bytes.
- // Conflicts with ShmSize if ShmSize is not private.
+ // Conflicts with ShmSize if IpcNS is not private.
// Optional.
ShmSize *int64 `json:"shm_size,omitempty"`
// WorkDir is the container's working directory.
@@ -234,6 +238,9 @@ type ContainerSecurityConfig struct {
// will use.
// Optional.
ApparmorProfile string `json:"apparmor_profile,omitempty"`
+ // SeccompPolicy determines which seccomp profile gets applied
+ // the container. valid values: empty,default,image
+ SeccompPolicy string `json:"seccomp_policy,omitempty"`
// SeccompProfilePath is the path to a JSON file containing the
// container's Seccomp profile.
// If not specified, no Seccomp profile will be used.
@@ -252,7 +259,10 @@ type ContainerSecurityConfig struct {
// IDMappings are UID and GID mappings that will be used by user
// namespaces.
// Required if UserNS is private.
- IDMappings storage.IDMappingOptions `json:"idmappings,omitempty"`
+ IDMappings *storage.IDMappingOptions `json:"idmappings,omitempty"`
+ // ReadOnlyFilesystem indicates that everything will be mounted
+ // as read-only
+ ReadOnlyFilesystem bool `json:"read_only_filesystem,omittempty"`
}
// ContainerCgroupConfig contains configuration information about a container's
@@ -260,16 +270,13 @@ type ContainerSecurityConfig struct {
type ContainerCgroupConfig struct {
// CgroupNS is the container's cgroup namespace.
// It defaults to private.
- // Conflicts with NoCgroups if not set to host.
// Mandatory.
CgroupNS Namespace `json:"cgroupns,omitempty"`
- // NoCgroups indicates that the container should not create CGroups.
- // Conflicts with CgroupParent and CgroupNS if CgroupNS is not set to
- // host.
- NoCgroups bool `json:"no_cgroups,omitempty"`
+ // CgroupsMode sets a policy for how cgroups will be created in the
+ // container, including the ability to disable creation entirely.
+ CgroupsMode string `json:"cgroups_mode,omitempty"`
// CgroupParent is the container's CGroup parent.
// If not set, the default for the current cgroup driver will be used.
- // Conflicts with NoCgroups.
// Optional.
CgroupParent string `json:"cgroup_parent,omitempty"`
}
@@ -348,7 +355,7 @@ type ContainerNetworkConfig struct {
// ContainerResourceConfig contains information on container resource limits.
type ContainerResourceConfig struct {
- // ResourceLimits are resource limits to apply to the container.
+ // ResourceLimits are resource limits to apply to the container.,
// Can only be set as root on cgroups v1 systems, but can be set as
// rootless as well for cgroups v2.
// Optional.
@@ -365,11 +372,12 @@ type ContainerResourceConfig struct {
// ContainerHealthCheckConfig describes a container healthcheck with attributes
// like command, retries, interval, start period, and timeout.
type ContainerHealthCheckConfig struct {
- HealthConfig manifest.Schema2HealthConfig `json:"healthconfig,omitempty"`
+ HealthConfig *manifest.Schema2HealthConfig `json:"healthconfig,omitempty"`
}
// SpecGenerator creates an OCI spec and Libpod configuration options to create
// a container based on the given configuration.
+// swagger:model SpecGenerator
type SpecGenerator struct {
ContainerBasicConfig
ContainerStorageConfig
@@ -381,19 +389,24 @@ type SpecGenerator struct {
}
// NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs
-func NewSpecGenerator(image, rootfs *string) (*SpecGenerator, error) {
- _ = image
- _ = rootfs
- return &SpecGenerator{}, define.ErrNotImplemented
-}
-
-// Validate verifies that the given SpecGenerator is valid and satisfies required
-// input for creating a container.
-func (s *SpecGenerator) Validate() error {
- return define.ErrNotImplemented
+func NewSpecGenerator(image string) *SpecGenerator {
+ net := ContainerNetworkConfig{
+ NetNS: Namespace{
+ NSMode: Bridge,
+ },
+ }
+ csc := ContainerStorageConfig{Image: image}
+ if rootless.IsRootless() {
+ net.NetNS.NSMode = Slirp
+ }
+ return &SpecGenerator{
+ ContainerStorageConfig: csc,
+ ContainerNetworkConfig: net,
+ }
}
-// MakeContainer creates a container based on the SpecGenerator
-func (s *SpecGenerator) MakeContainer() (*libpod.Container, error) {
- return nil, define.ErrNotImplemented
+// NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs
+func NewSpecGeneratorWithRootfs(rootfs string) *SpecGenerator {
+ csc := ContainerStorageConfig{Rootfs: rootfs}
+ return &SpecGenerator{ContainerStorageConfig: csc}
}
diff --git a/pkg/specgen/validate.go b/pkg/specgen/validate.go
new file mode 100644
index 000000000..78e4d8ad5
--- /dev/null
+++ b/pkg/specgen/validate.go
@@ -0,0 +1,159 @@
+package specgen
+
+import (
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/pkg/errors"
+)
+
+var (
+ // ErrInvalidSpecConfig describes an error that the given SpecGenerator is invalid
+ ErrInvalidSpecConfig error = errors.New("invalid configuration")
+ // SystemDValues describes the only values that SystemD can be
+ SystemDValues = []string{"true", "false", "always"}
+ // ImageVolumeModeValues describes the only values that ImageVolumeMode can be
+ ImageVolumeModeValues = []string{"ignore", "tmpfs", "anonymous"}
+)
+
+func exclusiveOptions(opt1, opt2 string) error {
+ return errors.Errorf("%s and %s are mutually exclusive options", opt1, opt2)
+}
+
+// Validate verifies that the given SpecGenerator is valid and satisfies required
+// input for creating a container.
+func (s *SpecGenerator) validate(rt *libpod.Runtime) error {
+
+ //
+ // ContainerBasicConfig
+ //
+ // Rootfs and Image cannot both populated
+ if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 {
+ return errors.Wrap(ErrInvalidSpecConfig, "both image and rootfs cannot be simultaneously")
+ }
+ // Cannot set hostname and utsns
+ if len(s.ContainerBasicConfig.Hostname) > 0 && !s.ContainerBasicConfig.UtsNS.IsPrivate() {
+ return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when creating an UTS namespace")
+ }
+ // systemd values must be true, false, or always
+ if len(s.ContainerBasicConfig.Systemd) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.Systemd), SystemDValues) {
+ return errors.Wrapf(ErrInvalidSpecConfig, "SystemD values must be one of %s", strings.Join(SystemDValues, ","))
+ }
+
+ //
+ // ContainerStorageConfig
+ //
+ // rootfs and image cannot both be set
+ if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 {
+ return exclusiveOptions("rootfs", "image")
+ }
+ // imagevolumemode must be one of ignore, tmpfs, or anonymous if given
+ if len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode), ImageVolumeModeValues) {
+ return errors.Errorf("ImageVolumeMode values must be one of %s", strings.Join(ImageVolumeModeValues, ","))
+ }
+ // shmsize conflicts with IPC namespace
+ if s.ContainerStorageConfig.ShmSize != nil && !s.ContainerStorageConfig.IpcNS.IsPrivate() {
+ return errors.New("cannot set shmsize when creating an IPC namespace")
+ }
+
+ //
+ // ContainerSecurityConfig
+ //
+ // groups and privileged are exclusive
+ if len(s.Groups) > 0 && s.Privileged {
+ return exclusiveOptions("Groups", "privileged")
+ }
+ // capadd and privileged are exclusive
+ if len(s.CapAdd) > 0 && s.Privileged {
+ return exclusiveOptions("CapAdd", "privileged")
+ }
+ // selinuxprocesslabel and privileged are exclusive
+ if len(s.SelinuxProcessLabel) > 0 && s.Privileged {
+ return exclusiveOptions("SelinuxProcessLabel", "privileged")
+ }
+ // selinuxmounmtlabel and privileged are exclusive
+ if len(s.SelinuxMountLabel) > 0 && s.Privileged {
+ return exclusiveOptions("SelinuxMountLabel", "privileged")
+ }
+ // selinuxopts and privileged are exclusive
+ if len(s.SelinuxOpts) > 0 && s.Privileged {
+ return exclusiveOptions("SelinuxOpts", "privileged")
+ }
+ // apparmor and privileged are exclusive
+ if len(s.ApparmorProfile) > 0 && s.Privileged {
+ return exclusiveOptions("AppArmorProfile", "privileged")
+ }
+ // userns and idmappings conflict
+ if s.UserNS.IsPrivate() && s.IDMappings == nil {
+ return errors.Wrap(ErrInvalidSpecConfig, "IDMappings are required when not creating a User namespace")
+ }
+
+ //
+ // ContainerCgroupConfig
+ //
+ //
+ // None for now
+
+ //
+ // ContainerNetworkConfig
+ //
+ if !s.NetNS.IsPrivate() && s.ConfigureNetNS {
+ return errors.New("can only configure network namespace when creating a network a network namespace")
+ }
+ // useimageresolveconf conflicts with dnsserver, dnssearch, dnsoption
+ if s.UseImageResolvConf {
+ if len(s.DNSServer) > 0 {
+ return exclusiveOptions("UseImageResolvConf", "DNSServer")
+ }
+ if len(s.DNSSearch) > 0 {
+ return exclusiveOptions("UseImageResolvConf", "DNSSearch")
+ }
+ if len(s.DNSOption) > 0 {
+ return exclusiveOptions("UseImageResolvConf", "DNSOption")
+ }
+ }
+ // UseImageHosts and HostAdd are exclusive
+ if s.UseImageHosts && len(s.HostAdd) > 0 {
+ return exclusiveOptions("UseImageHosts", "HostAdd")
+ }
+
+ // TODO the specgen does not appear to handle this? Should it
+ //switch config.Cgroup.Cgroups {
+ //case "disabled":
+ // if addedResources {
+ // return errors.New("cannot specify resource limits when cgroups are disabled is specified")
+ // }
+ // configSpec.Linux.Resources = &spec.LinuxResources{}
+ //case "enabled", "no-conmon", "":
+ // // Do nothing
+ //default:
+ // return errors.New("unrecognized option for cgroups; supported are 'default', 'disabled', 'no-conmon'")
+ //}
+
+ // Namespaces
+ if err := s.UtsNS.validate(); err != nil {
+ return err
+ }
+ if err := s.IpcNS.validate(); err != nil {
+ return err
+ }
+ if err := s.NetNS.validate(); err != nil {
+ return err
+ }
+ if err := s.PidNS.validate(); err != nil {
+ return err
+ }
+ if err := s.CgroupNS.validate(); err != nil {
+ return err
+ }
+ if err := s.UserNS.validate(); err != nil {
+ return err
+ }
+
+ // The following are defaults as needed by container creation
+ if len(s.WorkDir) < 1 {
+ s.WorkDir = "/"
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
new file mode 100644
index 000000000..be0791620
--- /dev/null
+++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The Containers Storage Project Community Code of Conduct
+
+The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 1b69d6060..09937303b 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -54,19 +54,19 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.
containers-storage: $(sources) ## build using gc on the host
$(GO_BUILD) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
-layers_ffjson.go: layers.go
+layers_ffjson.go: $(FFJSON) layers.go
$(RM) $@
$(FFJSON) layers.go
-images_ffjson.go: images.go
+images_ffjson.go: $(FFJSON) images.go
$(RM) $@
$(FFJSON) images.go
-containers_ffjson.go: containers.go
+containers_ffjson.go: $(FFJSON) containers.go
$(RM) $@
$(FFJSON) containers.go
-pkg/archive/archive_ffjson.go: pkg/archive/archive.go
+pkg/archive/archive_ffjson.go: $(FFJSON) pkg/archive/archive.go
$(RM) $@
$(FFJSON) pkg/archive/archive.go
@@ -118,6 +118,9 @@ validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor u
install.tools:
make -C tests/tools
+$(FFJSON):
+ make -C tests/tools build/ffjson
+
install.docs: docs
make -C docs install
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 98e863cdf..15b989e39 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.15.8
+1.16.0
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index ba40f9c14..84dd86a20 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -7,10 +7,10 @@ require (
github.com/Microsoft/hcsshim v0.8.7
github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
github.com/docker/go-units v0.4.0
- github.com/klauspost/compress v1.9.8
+ github.com/klauspost/compress v1.10.0
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/pgzip v1.2.1
- github.com/mattn/go-shellwords v1.0.9
+ github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/runc v1.0.0-rc9
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index e2785594d..c2029949a 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -79,6 +79,8 @@ github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug2
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y=
+github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
@@ -93,6 +95,8 @@ github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWeP
github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.9 h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
+github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index 0dde97c18..e1954ad04 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./images.go
+// source: images.go
package storage
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
new file mode 100644
index 000000000..3a1095226
--- /dev/null
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -0,0 +1,2156 @@
+// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
+// source: layers.go
+
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/opencontainers/go-digest"
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+)
+
+// MarshalJSON marshal bytes to json - template
+func (j *DiffOptions) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if j.Compression != nil {
+ buf.WriteString(`{"Compression":`)
+ fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0)
+ } else {
+ buf.WriteString(`{"Compression":null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtDiffOptionsbase = iota
+ ffjtDiffOptionsnosuchkey
+
+ ffjtDiffOptionsCompression
+)
+
+var ffjKeyDiffOptionsCompression = []byte("Compression")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *DiffOptions) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtDiffOptionsbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'C':
+
+ if bytes.Equal(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtDiffOptionsCompression:
+ goto handle_Compression
+
+ case ffjtDiffOptionsnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Compression:
+
+ /* handler: j.Compression type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ j.Compression = nil
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ ttypval := archive.Compression(tval)
+ j.Compression = &ttypval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *Layer) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{ "id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteByte(',')
+ if len(j.Names) != 0 {
+ buf.WriteString(`"names":`)
+ if j.Names != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Names {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Parent) != 0 {
+ buf.WriteString(`"parent":`)
+ fflib.WriteJsonString(buf, string(j.Parent))
+ buf.WriteByte(',')
+ }
+ if len(j.Metadata) != 0 {
+ buf.WriteString(`"metadata":`)
+ fflib.WriteJsonString(buf, string(j.Metadata))
+ buf.WriteByte(',')
+ }
+ if len(j.MountLabel) != 0 {
+ buf.WriteString(`"mountlabel":`)
+ fflib.WriteJsonString(buf, string(j.MountLabel))
+ buf.WriteByte(',')
+ }
+ if true {
+ buf.WriteString(`"created":`)
+
+ {
+
+ obj, err = j.Created.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ buf.Write(obj)
+
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.CompressedDigest) != 0 {
+ buf.WriteString(`"compressed-diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.CompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.CompressedSize != 0 {
+ buf.WriteString(`"compressed-size":`)
+ fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.UncompressedDigest) != 0 {
+ buf.WriteString(`"diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.UncompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.UncompressedSize != 0 {
+ buf.WriteString(`"diff-size":`)
+ fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if j.CompressionType != 0 {
+ buf.WriteString(`"compression":`)
+ fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.UIDs) != 0 {
+ buf.WriteString(`"uidset":`)
+ if j.UIDs != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.UIDs {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, false)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.GIDs) != 0 {
+ buf.WriteString(`"gidset":`)
+ if j.GIDs != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.GIDs {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, false)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Flags) != 0 {
+ buf.WriteString(`"flags":`)
+ /* Falling back. type=map[string]interface {} kind=map */
+ err = buf.Encode(j.Flags)
+ if err != nil {
+ return err
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.UIDMap) != 0 {
+ buf.WriteString(`"uidmap":`)
+ if j.UIDMap != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.UIDMap {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ /* Struct fall back. type=idtools.IDMap kind=struct */
+ err = buf.Encode(&v)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.GIDMap) != 0 {
+ buf.WriteString(`"gidmap":`)
+ if j.GIDMap != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.GIDMap {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ /* Struct fall back. type=idtools.IDMap kind=struct */
+ err = buf.Encode(&v)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtLayerbase = iota
+ ffjtLayernosuchkey
+
+ ffjtLayerID
+
+ ffjtLayerNames
+
+ ffjtLayerParent
+
+ ffjtLayerMetadata
+
+ ffjtLayerMountLabel
+
+ ffjtLayerCreated
+
+ ffjtLayerCompressedDigest
+
+ ffjtLayerCompressedSize
+
+ ffjtLayerUncompressedDigest
+
+ ffjtLayerUncompressedSize
+
+ ffjtLayerCompressionType
+
+ ffjtLayerUIDs
+
+ ffjtLayerGIDs
+
+ ffjtLayerFlags
+
+ ffjtLayerUIDMap
+
+ ffjtLayerGIDMap
+)
+
+var ffjKeyLayerID = []byte("id")
+
+var ffjKeyLayerNames = []byte("names")
+
+var ffjKeyLayerParent = []byte("parent")
+
+var ffjKeyLayerMetadata = []byte("metadata")
+
+var ffjKeyLayerMountLabel = []byte("mountlabel")
+
+var ffjKeyLayerCreated = []byte("created")
+
+var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest")
+
+var ffjKeyLayerCompressedSize = []byte("compressed-size")
+
+var ffjKeyLayerUncompressedDigest = []byte("diff-digest")
+
+var ffjKeyLayerUncompressedSize = []byte("diff-size")
+
+var ffjKeyLayerCompressionType = []byte("compression")
+
+var ffjKeyLayerUIDs = []byte("uidset")
+
+var ffjKeyLayerGIDs = []byte("gidset")
+
+var ffjKeyLayerFlags = []byte("flags")
+
+var ffjKeyLayerUIDMap = []byte("uidmap")
+
+var ffjKeyLayerGIDMap = []byte("gidmap")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *Layer) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtLayerbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'd':
+
+ if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'g':
+
+ if bytes.Equal(ffjKeyLayerGIDs, kn) {
+ currentKey = ffjtLayerGIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerGIDMap, kn) {
+ currentKey = ffjtLayerGIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffjKeyLayerUIDs, kn) {
+ currentKey = ffjtLayerUIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerUIDMap, kn) {
+ currentKey = ffjtLayerUIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerGIDMap, kn) {
+ currentKey = ffjtLayerGIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerUIDMap, kn) {
+ currentKey = ffjtLayerUIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerGIDs, kn) {
+ currentKey = ffjtLayerGIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUIDs, kn) {
+ currentKey = ffjtLayerUIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtLayerID:
+ goto handle_ID
+
+ case ffjtLayerNames:
+ goto handle_Names
+
+ case ffjtLayerParent:
+ goto handle_Parent
+
+ case ffjtLayerMetadata:
+ goto handle_Metadata
+
+ case ffjtLayerMountLabel:
+ goto handle_MountLabel
+
+ case ffjtLayerCreated:
+ goto handle_Created
+
+ case ffjtLayerCompressedDigest:
+ goto handle_CompressedDigest
+
+ case ffjtLayerCompressedSize:
+ goto handle_CompressedSize
+
+ case ffjtLayerUncompressedDigest:
+ goto handle_UncompressedDigest
+
+ case ffjtLayerUncompressedSize:
+ goto handle_UncompressedSize
+
+ case ffjtLayerCompressionType:
+ goto handle_CompressionType
+
+ case ffjtLayerUIDs:
+ goto handle_UIDs
+
+ case ffjtLayerGIDs:
+ goto handle_GIDs
+
+ case ffjtLayerFlags:
+ goto handle_Flags
+
+ case ffjtLayerUIDMap:
+ goto handle_UIDMap
+
+ case ffjtLayerGIDMap:
+ goto handle_GIDMap
+
+ case ffjtLayernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Names:
+
+ /* handler: j.Names type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Names = nil
+ } else {
+
+ j.Names = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJNames = string(string(outBuf))
+
+ }
+ }
+
+ j.Names = append(j.Names, tmpJNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Parent:
+
+ /* handler: j.Parent type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Parent = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Metadata:
+
+ /* handler: j.Metadata type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Metadata = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountLabel:
+
+ /* handler: j.MountLabel type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountLabel = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Created:
+
+ /* handler: j.Created type=time.Time kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = j.Created.UnmarshalJSON(tbuf)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedDigest:
+
+ /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.CompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedSize:
+
+ /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedDigest:
+
+ /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.UncompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedSize:
+
+ /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.UncompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressionType:
+
+ /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressionType = archive.Compression(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UIDs:
+
+ /* handler: j.UIDs type=[]uint32 kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.UIDs = nil
+ } else {
+
+ j.UIDs = []uint32{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJUIDs uint32
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJUIDs type=uint32 kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJUIDs = uint32(tval)
+
+ }
+ }
+
+ j.UIDs = append(j.UIDs, tmpJUIDs)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_GIDs:
+
+ /* handler: j.GIDs type=[]uint32 kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.GIDs = nil
+ } else {
+
+ j.GIDs = []uint32{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJGIDs uint32
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJGIDs type=uint32 kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJGIDs = uint32(tval)
+
+ }
+ }
+
+ j.GIDs = append(j.GIDs, tmpJGIDs)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Flags:
+
+ /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Flags = nil
+ } else {
+
+ j.Flags = make(map[string]interface{}, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJFlags interface{}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/
+
+ {
+ /* Falling back. type=interface {} kind=interface */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJFlags)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.Flags[k] = tmpJFlags
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UIDMap:
+
+ /* handler: j.UIDMap type=[]idtools.IDMap kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.UIDMap = nil
+ } else {
+
+ j.UIDMap = []idtools.IDMap{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJUIDMap idtools.IDMap
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJUIDMap type=idtools.IDMap kind=struct quoted=false*/
+
+ {
+ /* Falling back. type=idtools.IDMap kind=struct */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJUIDMap)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.UIDMap = append(j.UIDMap, tmpJUIDMap)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_GIDMap:
+
+ /* handler: j.GIDMap type=[]idtools.IDMap kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.GIDMap = nil
+ } else {
+
+ j.GIDMap = []idtools.IDMap{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJGIDMap idtools.IDMap
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJGIDMap type=idtools.IDMap kind=struct quoted=false*/
+
+ {
+ /* Falling back. type=idtools.IDMap kind=struct */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJGIDMap)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.GIDMap = append(j.GIDMap, tmpJGIDMap)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerMountPoint) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteString(`,"path":`)
+ fflib.WriteJsonString(buf, string(j.MountPoint))
+ buf.WriteString(`,"count":`)
+ fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtlayerMountPointbase = iota
+ ffjtlayerMountPointnosuchkey
+
+ ffjtlayerMountPointID
+
+ ffjtlayerMountPointMountPoint
+
+ ffjtlayerMountPointMountCount
+)
+
+var ffjKeylayerMountPointID = []byte("id")
+
+var ffjKeylayerMountPointMountPoint = []byte("path")
+
+var ffjKeylayerMountPointMountCount = []byte("count")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerMountPoint) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerMountPointbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerMountPointID:
+ goto handle_ID
+
+ case ffjtlayerMountPointMountPoint:
+ goto handle_MountPoint
+
+ case ffjtlayerMountPointMountCount:
+ goto handle_MountCount
+
+ case ffjtlayerMountPointnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountPoint:
+
+ /* handler: j.MountPoint type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountPoint = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountCount:
+
+ /* handler: j.MountCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.MountCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerStore) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtlayerStorebase = iota
+ ffjtlayerStorenosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerStore) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerStorebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerStorenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *simpleGetCloser) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtsimpleGetCloserbase = iota
+ ffjtsimpleGetClosernosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *simpleGetCloser) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtsimpleGetCloserbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtsimpleGetClosernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
index c001fbecb..d28ba9d69 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
@@ -1,23 +1,96 @@
-// +build linux
-
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
+ "errors"
"os"
-
- "github.com/containers/storage/pkg/idtools"
+ "path/filepath"
+ "strings"
)
-// GetStatic returns the home directory for the current user without calling
-// os/user.Current(). This is useful for static-linked binary on glibc-based
-// system, because a call to os/user.Current() in a static binary leads to
-// segfault due to a glibc issue that won't be fixed in a short term.
-// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
-func GetStatic() (string, error) {
- uid := os.Getuid()
- usr, err := idtools.LookupUID(uid)
+// GetRuntimeDir returns XDG_RUNTIME_DIR.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetRuntimeDir() (string, error) {
+ if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
+ return xdgRuntimeDir, nil
+ }
+ return "", errors.New("could not get XDG_RUNTIME_DIR")
+}
+
+// StickRuntimeDirContents sets the sticky bit on files that are under
+// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
+//
+// StickyRuntimeDir returns slice of sticked files.
+// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ runtimeDir, err := GetRuntimeDir()
if err != nil {
- return "", err
+ // ignore error if runtimeDir is empty
+ return nil, nil
+ }
+ runtimeDir, err = filepath.Abs(runtimeDir)
+ if err != nil {
+ return nil, err
+ }
+ var sticked []string
+ for _, f := range files {
+ f, err = filepath.Abs(f)
+ if err != nil {
+ return sticked, err
+ }
+ if strings.HasPrefix(f, runtimeDir+"/") {
+ if err = stick(f); err != nil {
+ return sticked, err
+ }
+ sticked = append(sticked, f)
+ }
+ }
+ return sticked, nil
+}
+
+func stick(f string) error {
+ st, err := os.Stat(f)
+ if err != nil {
+ return err
+ }
+ m := st.Mode()
+ m |= os.ModeSticky
+ return os.Chmod(f, m)
+}
+
+// GetDataHome returns XDG_DATA_HOME.
+// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetDataHome() (string, error) {
+ if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
+ return xdgDataHome, nil
+ }
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", errors.New("could not get either XDG_DATA_HOME or HOME")
+ }
+ return filepath.Join(home, ".local", "share"), nil
+}
+
+// GetConfigHome returns XDG_CONFIG_HOME.
+// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetConfigHome() (string, error) {
+ if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
+ return xdgConfigHome, nil
+ }
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
}
- return usr.Home, nil
+ return filepath.Join(home, ".config"), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
index 6b96b856f..f7bcfb878 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
@@ -2,12 +2,29 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"errors"
)
-// GetStatic is not needed for non-linux systems.
-// (Precisely, it is needed only for glibc-based linux systems.)
-func GetStatic() (string, error) {
- return "", errors.New("homedir.GetStatic() is not supported on this system")
+// GetRuntimeDir is unsupported on non-linux system.
+func GetRuntimeDir() (string, error) {
+ return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
+}
+
+// StickRuntimeDirContents is unsupported on non-linux system.
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
+}
+
+// GetDataHome is unsupported on non-linux system.
+func GetDataHome() (string, error) {
+ return "", errors.New("homedir.GetDataHome() is not supported on this system")
+}
+
+// GetConfigHome is unsupported on non-linux system.
+func GetConfigHome() (string, error) {
+ return "", errors.New("homedir.GetConfigHome() is not supported on this system")
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index f2a20ea8f..dcadb7e8d 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -2,10 +2,12 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"os"
-
- "github.com/opencontainers/runc/libcontainer/user"
+ "os/user"
)
// Key returns the env var name for the user's home dir based on
@@ -17,11 +19,16 @@ func Key() string {
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
+//
+// If linking statically with cgo enabled against glibc, ensure the
+// osusergo build tag is used.
+//
+// If needing to do nss lookups, do not disable cgo or set osusergo.
func Get() string {
home := os.Getenv(Key())
if home == "" {
- if u, err := user.CurrentUser(); err == nil {
- return u.Home
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
}
}
return home
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
index fafdb2bbf..4f2615ed3 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
@@ -1,5 +1,8 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"os"
)
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
index 1ae728a61..372bee732 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -5,9 +5,6 @@ package reexec
import (
"context"
"os/exec"
- "syscall"
-
- "golang.org/x/sys/unix"
)
// Self returns the path to the current process's binary.
@@ -16,28 +13,20 @@ func Self() string {
return "/proc/self/exe"
}
-// Command returns *exec.Cmd which has Path as current binary. Also it setting
-// SysProcAttr.Pdeathsig to SIGTERM.
+// Command returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
cmd := exec.Command(Self())
cmd.Args = args
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Pdeathsig: unix.SIGTERM,
- }
return cmd
}
-// CommandContext returns *exec.Cmd which has Path as current binary, and also
-// sets SysProcAttr.Pdeathsig to SIGTERM.
+// CommandContext returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Pdeathsig: unix.SIGTERM,
- }
return cmd
}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index b84be4424..d978c476d 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/storage/pkg/archive"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
@@ -3275,9 +3276,9 @@ const defaultConfigFile = "/etc/containers/storage.conf"
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
if rootless {
- home, err := homeDir()
- if err != nil {
- return "", errors.Wrapf(err, "cannot determine users homedir")
+ home := homedir.Get()
+ if home == "" {
+ return "", errors.New("cannot determine user's homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 28e0a8d6d..7e4b27d0f 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -4,12 +4,12 @@ import (
"fmt"
"os"
"os/exec"
- "os/user"
"path/filepath"
"strconv"
"strings"
"github.com/BurntSushi/toml"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
@@ -82,9 +82,8 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
}
func getRootlessRuntimeDir(rootlessUID int) (string, error) {
- runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
-
- if runtimeDir != "" {
+ runtimeDir, err := homedir.GetRuntimeDir()
+ if err == nil {
return runtimeDir, nil
}
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUID)
@@ -98,8 +97,8 @@ func getRootlessRuntimeDir(rootlessUID int) (string, error) {
} else {
return tmpDir, nil
}
- home, err := homeDir()
- if err != nil {
+ home := homedir.Get()
+ if home == "" {
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
}
resolvedHome, err := filepath.EvalSymlinks(home)
@@ -117,20 +116,23 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) {
return "", "", err
}
- dataDir := os.Getenv("XDG_DATA_HOME")
- if dataDir == "" {
- home, err := homeDir()
- if err != nil {
- return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
- }
- // runc doesn't like symlinks in the rootfs path, and at least
- // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- dataDir = filepath.Join(resolvedHome, ".local", "share")
+ dataDir, err := homedir.GetDataHome()
+ if err == nil {
+ return dataDir, rootlessRuntime, nil
+ }
+
+ home := homedir.Get()
+ if home == "" {
+ return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ // runc doesn't like symlinks in the rootfs path, and at least
+ // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "cannot resolve %s", home)
}
+ dataDir = filepath.Join(resolvedHome, ".local", "share")
+
return dataDir, rootlessRuntime, nil
}
@@ -246,15 +248,3 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
}
return storageOpts, nil
}
-
-func homeDir() (string, error) {
- home := os.Getenv("HOME")
- if home == "" {
- usr, err := user.Current()
- if err != nil {
- return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- home = usr.HomeDir
- }
- return home, nil
-}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index b0a470f92..3d2fdcd77 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -42,10 +42,10 @@ const (
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
maxMatchOffset = 1 << 15 // The largest match offset
- bTableBits = 18 // Bits used in the big tables
- bTableSize = 1 << bTableBits // Size of the table
- allocHistory = maxMatchOffset * 10 // Size to preallocate for history.
- bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this.
+ bTableBits = 18 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 20 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)
const (
@@ -210,16 +210,14 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
// Reset the encoding table.
func (e *fastGen) Reset() {
- if cap(e.hist) < int(maxMatchOffset*8) {
- l := maxMatchOffset * 8
- // Make it at least 1MB.
- if l < 1<<20 {
- l = 1 << 20
- }
- e.hist = make([]byte, 0, l)
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
}
- // We offset current position so everything will be out of reach
- e.cur += maxMatchOffset + int32(len(e.hist))
e.hist = e.hist[:0]
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 9feea87a3..56ee6dc8b 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -177,6 +177,11 @@ func (w *huffmanBitWriter) flush() {
w.nbits = 0
return
}
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
n := w.nbytes
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
@@ -594,8 +599,8 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
tokens.AddEOB()
}
- // We cannot reuse pure huffman table.
- if w.lastHuffMan && w.lastHeader > 0 {
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
// We will not try to reuse.
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
index 20de8f11f..102fc74c7 100644
--- a/vendor/github.com/klauspost/compress/flate/level1.go
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
@@ -14,6 +16,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
index 7c824431e..dc6b1d314 100644
--- a/vendor/github.com/klauspost/compress/flate/level2.go
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
@@ -16,6 +18,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
index 4153d24c9..1a3ff9b6b 100644
--- a/vendor/github.com/klauspost/compress/flate/level3.go
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastEncL3
type fastEncL3 struct {
fastGen
@@ -13,6 +15,10 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
index c689ac771..f3ecc9c4d 100644
--- a/vendor/github.com/klauspost/compress/flate/level4.go
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -13,7 +13,9 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
-
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
index 14a235612..4e3916825 100644
--- a/vendor/github.com/klauspost/compress/flate/level5.go
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -13,6 +13,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
index cad0c7df7..00a311977 100644
--- a/vendor/github.com/klauspost/compress/flate/level6.go
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -13,6 +13,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index a47051197..53e899124 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -8,6 +8,8 @@ import (
const (
maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
slTableBits = 13
slTableSize = 1 << slTableBits
@@ -25,11 +27,11 @@ func (s *statelessWriter) Close() error {
}
s.closed = true
// Emit EOF block
- return StatelessDeflate(s.dst, nil, true)
+ return StatelessDeflate(s.dst, nil, true, nil)
}
func (s *statelessWriter) Write(p []byte) (n int, err error) {
- err = StatelessDeflate(s.dst, p, false)
+ err = StatelessDeflate(s.dst, p, false, nil)
if err != nil {
return 0, err
}
@@ -59,7 +61,10 @@ var bitWriterPool = sync.Pool{
// StatelessDeflate allows to compress directly to a Writer without retaining state.
// When returning everything will be flushed.
-func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
+// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
@@ -76,35 +81,53 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
return bw.err
}
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
for len(in) > 0 {
todo := in
- if len(todo) > maxStatelessBlock {
- todo = todo[:maxStatelessBlock]
+ if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
}
in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
// Compress
- statelessEnc(&dst, todo)
+ statelessEnc(&dst, todo, int16(len(dict)))
isEof := eof && len(in) == 0
if dst.n == 0 {
- bw.writeStoredHeader(len(todo), isEof)
+ bw.writeStoredHeader(len(uncompressed), isEof)
if bw.err != nil {
return bw.err
}
- bw.writeBytes(todo)
- } else if int(dst.n) > len(todo)-len(todo)>>4 {
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
// If we removed less than 1/16th, huffman compress the block.
- bw.writeBlockHuff(isEof, todo, false)
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
- bw.writeBlockDynamic(&dst, isEof, todo, false)
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ dict = todo[len(todo)-maxStatelessDict:]
+ dst.Reset()
}
if bw.err != nil {
return bw.err
}
- dst.Reset()
}
if !eof {
- // Align.
+ // Align, only a stored block can do that.
bw.writeStoredHeader(0, false)
}
bw.flush()
@@ -130,7 +153,7 @@ func load6416(b []byte, i int16) uint64 {
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
-func statelessEnc(dst *tokens, src []byte) {
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
@@ -144,15 +167,23 @@ func statelessEnc(dst *tokens, src []byte) {
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
- dst.n = uint16(len(src))
+ dst.n = 0
return
}
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
- s := int16(1)
- nextEmit := int16(0)
+ s := startAt + 1
+ nextEmit := startAt
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go
deleted file mode 100644
index ddcbf229e..000000000
--- a/vendor/github.com/mattn/go-shellwords/util_go15.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build !go1.6
-
-package shellwords
-
-import (
- "os"
- "os/exec"
- "runtime"
- "strings"
-)
-
-func shellRun(line, dir string) (string, error) {
- var b []byte
- var err error
- var cmd *exec.Cmd
- if runtime.GOOS == "windows" {
- cmd = exec.Command(os.Getenv("COMSPEC"), "/c", line)
- } else {
- cmd = exec.Command(os.Getenv("SHELL"), "-c", line)
- }
- if dir != "" {
- cmd.Dir = dir
- }
- b, err = cmd.Output()
- if err != nil {
- return "", err
- }
- return strings.TrimSpace(string(b)), nil
-}
diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go
index 3aef2c4d7..988fc9ed2 100644
--- a/vendor/github.com/mattn/go-shellwords/util_posix.go
+++ b/vendor/github.com/mattn/go-shellwords/util_posix.go
@@ -1,4 +1,4 @@
-// +build !windows,go1.6
+// +build !windows
package shellwords
@@ -10,7 +10,10 @@ import (
)
func shellRun(line, dir string) (string, error) {
- shell := os.Getenv("SHELL")
+ var shell string
+ if shell = os.Getenv("SHELL"); shell == "" {
+ shell = "/bin/sh"
+ }
cmd := exec.Command(shell, "-c", line)
if dir != "" {
cmd.Dir = dir
diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go
index cda685091..20546737c 100644
--- a/vendor/github.com/mattn/go-shellwords/util_windows.go
+++ b/vendor/github.com/mattn/go-shellwords/util_windows.go
@@ -1,4 +1,4 @@
-// +build windows,go1.6
+// +build windows
package shellwords
@@ -10,7 +10,10 @@ import (
)
func shellRun(line, dir string) (string, error) {
- shell := os.Getenv("COMSPEC")
+ var shell string
+ if shell = os.Getenv("COMSPEC"); shell == "" {
+ shell = "cmd"
+ }
cmd := exec.Command(shell, "/c", line)
if dir != "" {
cmd.Dir = dir
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index e0364e9e7..bf89ecd21 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -32,7 +32,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args
return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -160,7 +161,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
return False(t, value, append([]interface{}{msg}, args...)...)
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -267,7 +269,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -325,14 +327,6 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// YAMLEqf asserts that two YAML strings are equivalent.
-func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
@@ -369,6 +363,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
// Nilf asserts that the specified object is nil.
//
// assert.Nilf(t, err, "error message %s", "formatted")
@@ -379,6 +384,15 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool
return Nil(t, object, append([]interface{}{msg}, args...)...)
}
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -392,6 +406,15 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
return NoError(t, err, append([]interface{}{msg}, args...)...)
}
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -462,6 +485,19 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -491,6 +527,18 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool
return Panics(t, f, append([]interface{}{msg}, args...)...)
}
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
+}
+
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -557,6 +605,14 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
// Zerof asserts that i is the zero value for its type.
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 26830403a..75ecdcaa2 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -53,7 +53,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string,
return Containsf(a.t, s, contains, msg, args...)
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -61,7 +62,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
return DirExists(a.t, path, msgAndArgs...)
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -309,7 +311,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
return Falsef(a.t, value, msg, args...)
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -317,7 +320,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
return FileExists(a.t, path, msgAndArgs...)
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -521,7 +525,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}
// InDelta asserts that the two numerals are within delta of each other.
//
-// a.InDelta(math.Pi, (22 / 7.0), 0.01)
+// a.InDelta(math.Pi, 22/7.0, 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -563,7 +567,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -639,22 +643,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
return JSONEqf(a.t, expected, actual, msg, args...)
}
-// YAMLEq asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEqf(a.t, expected, actual, msg, args...)
-}
-
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@@ -727,6 +715,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
return Lessf(a.t, e1, e2, msg, args...)
}
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
// Nil asserts that the specified object is nil.
//
// a.Nil(err)
@@ -747,6 +757,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) b
return Nilf(a.t, object, msg, args...)
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExistsf(a.t, path, msg, args...)
+}
+
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -773,6 +801,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
return NoErrorf(a.t, err, msg, args...)
}
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExistsf(a.t, path, msg, args...)
+}
+
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -913,6 +959,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg
return NotRegexpf(a.t, rx, str, msg, args...)
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSamef(a.t, expected, actual, msg, args...)
+}
+
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -961,6 +1033,30 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -1103,6 +1199,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
// Zero asserts that i is the zero value for its type.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 044da8b01..bdd81389a 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -11,6 +11,7 @@ import (
"reflect"
"regexp"
"runtime"
+ "runtime/debug"
"strings"
"time"
"unicode"
@@ -21,7 +22,7 @@ import (
yaml "gopkg.in/yaml.v2"
)
-//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
@@ -351,6 +352,19 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
}
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if expected == nil && actual == nil {
+ return nil
+ }
+
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
// Same asserts that two pointers reference the same object.
//
// assert.Same(t, ptr1, ptr2)
@@ -362,18 +376,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
h.Helper()
}
- expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual)
- if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr {
- return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...)
- }
-
- expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual)
- if expectedType != actualType {
- return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v",
- expectedType, actualType), msgAndArgs...)
- }
-
- if expected != actual {
+ if !samePointers(expected, actual) {
return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
@@ -382,6 +385,42 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
return true
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf(
+ "Expected and actual point to the same object: %p %#v",
+ expected, expected), msgAndArgs...)
+ }
+ return true
+}
+
+// samePointers compares two generic interface objects and returns whether
+// they point to the same object
+func samePointers(first, second interface{}) bool {
+ firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
+ if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
+ return false
+ }
+
+ firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
+ if firstType != secondType {
+ return false
+ }
+
+ // compare pointer addresses
+ return first == second
+}
+
// formatUnequalValues takes two values of arbitrary types and returns string
// representations appropriate to be presented to the user.
//
@@ -393,9 +432,11 @@ func formatUnequalValues(expected, actual interface{}) (e string, a string) {
return fmt.Sprintf("%T(%#v)", expected, expected),
fmt.Sprintf("%T(%#v)", actual, actual)
}
-
- return fmt.Sprintf("%#v", expected),
- fmt.Sprintf("%#v", actual)
+ switch expected.(type) {
+ case time.Duration:
+ return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
+ }
+ return fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual)
}
// EqualValues asserts that two objects are equal or convertable to the same types
@@ -901,15 +942,17 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
type PanicTestFunc func()
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
-func didPanic(f PanicTestFunc) (bool, interface{}) {
+func didPanic(f PanicTestFunc) (bool, interface{}, string) {
didPanic := false
var message interface{}
+ var stack string
func() {
defer func() {
if message = recover(); message != nil {
didPanic = true
+ stack = string(debug.Stack())
}
}()
@@ -918,7 +961,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) {
}()
- return didPanic, message
+ return didPanic, message, stack
}
@@ -930,7 +973,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
h.Helper()
}
- if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+ if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
}
@@ -946,12 +989,34 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr
h.Helper()
}
- funcDidPanic, panicValue := didPanic(f)
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
if !funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
}
if panicValue != expected {
- return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ panicErr, ok := panicValue.(error)
+ if !ok || panicErr.Error() != errString {
+ return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
}
return true
@@ -965,8 +1030,8 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
h.Helper()
}
- if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
- return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
}
return true
@@ -1026,7 +1091,7 @@ func toFloat(x interface{}) (float64, bool) {
// InDelta asserts that the two numerals are within delta of each other.
//
-// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1314,7 +1379,8 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
return true
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1332,7 +1398,24 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
return true
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ return true
+ }
+ if info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1350,6 +1433,25 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
return true
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return true
+ }
+ return true
+ }
+ if !info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
+}
+
// JSONEq asserts that two JSON strings are equivalent.
//
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@@ -1439,15 +1541,6 @@ func diff(expected interface{}, actual interface{}) string {
return "\n\nDiff:\n" + diff
}
-// validateEqualArgs checks whether provided arguments can be safely used in the
-// Equal/NotEqual functions.
-func validateEqualArgs(expected, actual interface{}) error {
- if isFunction(expected) || isFunction(actual) {
- return errors.New("cannot take func type as argument")
- }
- return nil
-}
-
func isFunction(arg interface{}) bool {
if arg == nil {
return false
@@ -1475,24 +1568,59 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
h.Helper()
}
+ ch := make(chan bool, 1)
+
timer := time.NewTimer(waitFor)
- ticker := time.NewTicker(tick)
- checkPassed := make(chan bool)
defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
defer ticker.Stop()
- defer close(checkPassed)
- for {
+
+ for tick := ticker.C; ; {
select {
case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...)
- case result := <-checkPassed:
- if result {
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
return true
}
- case <-ticker.C:
- go func() {
- checkPassed <- condition()
- }()
+ tick = ticker.C
+ }
+ }
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return true
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return Fail(t, "Condition satisfied", msgAndArgs...)
+ }
+ tick = ticker.C
}
}
}
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
index 9ad56851d..df189d234 100644
--- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
}
}
-//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go
index ac71d4058..1dcb2338c 100644
--- a/vendor/github.com/stretchr/testify/require/forward_requirements.go
+++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
}
}
-//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index c5903f5db..cf6c7b566 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -66,7 +66,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args
t.FailNow()
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -77,7 +78,8 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
t.FailNow()
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -275,12 +277,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) {
//
// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
- if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) {
- return
- }
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) {
+ return
+ }
t.FailNow()
}
@@ -289,12 +291,12 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
//
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
- if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) {
- return
- }
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) {
+ return
+ }
t.FailNow()
}
@@ -394,7 +396,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) {
t.FailNow()
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -405,7 +408,8 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
t.FailNow()
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -660,7 +664,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDelta asserts that the two numerals are within delta of each other.
//
-// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -717,7 +721,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -820,28 +824,6 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
t.FailNow()
}
-// YAMLEq asserts that two YAML strings are equivalent.
-func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.YAMLEq(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.YAMLEqf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@@ -932,6 +914,34 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
t.FailNow()
}
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Never(t, condition, waitFor, tick, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Neverf(t, condition, waitFor, tick, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// Nil asserts that the specified object is nil.
//
// assert.Nil(t, err)
@@ -958,6 +968,30 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) {
t.FailNow()
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoDirExists(t, path, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoDirExistsf(t, path, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -990,6 +1024,30 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) {
t.FailNow()
}
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoFileExists(t, path, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoFileExistsf(t, path, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -1166,6 +1224,38 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
t.FailNow()
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotSame(t, expected, actual, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotSamef(t, expected, actual, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -1229,6 +1319,36 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
t.FailNow()
}
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.PanicsWithError(t, errString, f, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.PanicsWithErrorf(t, errString, f, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -1410,6 +1530,28 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
t.FailNow()
}
+// YAMLEq asserts that two YAML strings are equivalent.
+func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.YAMLEq(t, expected, actual, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.YAMLEqf(t, expected, actual, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// Zero asserts that i is the zero value for its type.
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 804fae035..5aac226df 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -54,7 +54,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string,
Containsf(a.t, s, contains, msg, args...)
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -62,7 +63,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
DirExists(a.t, path, msgAndArgs...)
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -310,7 +312,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) {
Falsef(a.t, value, msg, args...)
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -318,7 +321,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
FileExists(a.t, path, msgAndArgs...)
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -522,7 +526,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}
// InDelta asserts that the two numerals are within delta of each other.
//
-// a.InDelta(math.Pi, (22 / 7.0), 0.01)
+// a.InDelta(math.Pi, 22/7.0, 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -564,7 +568,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -640,22 +644,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
JSONEqf(a.t, expected, actual, msg, args...)
}
-// YAMLEq asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- YAMLEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- YAMLEqf(a.t, expected, actual, msg, args...)
-}
-
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@@ -728,6 +716,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
Lessf(a.t, e1, e2, msg, args...)
}
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
// Nil asserts that the specified object is nil.
//
// a.Nil(err)
@@ -748,6 +758,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) {
Nilf(a.t, object, msg, args...)
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoDirExistsf(a.t, path, msg, args...)
+}
+
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -774,6 +802,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) {
NoErrorf(a.t, err, msg, args...)
}
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoFileExistsf(a.t, path, msg, args...)
+}
+
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -914,6 +960,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg
NotRegexpf(a.t, rx, str, msg, args...)
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotSamef(a.t, expected, actual, msg, args...)
+}
+
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -962,6 +1034,30 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
Panics(a.t, f, msgAndArgs...)
}
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -1104,6 +1200,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
// Zero asserts that i is the zero value for its type.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go
index 6b85c5ece..91772dfeb 100644
--- a/vendor/github.com/stretchr/testify/require/requirements.go
+++ b/vendor/github.com/stretchr/testify/require/requirements.go
@@ -26,4 +26,4 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{})
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{})
-//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 814ea264e..9fd805077 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -141,7 +141,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.15.8
+# github.com/containers/storage v1.16.0
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -313,7 +313,7 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.9
github.com/json-iterator/go
-# github.com/klauspost/compress v1.9.8
+# github.com/klauspost/compress v1.10.0
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -324,7 +324,7 @@ github.com/klauspost/compress/zstd/internal/xxhash
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.2
github.com/konsorten/go-windows-terminal-sequences
-# github.com/mattn/go-shellwords v1.0.9
+# github.com/mattn/go-shellwords v1.0.10
github.com/mattn/go-shellwords
# github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/matttproud/golang_protobuf_extensions/pbutil
@@ -470,7 +470,7 @@ github.com/sirupsen/logrus/hooks/syslog
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
github.com/spf13/pflag
-# github.com/stretchr/testify v1.4.0
+# github.com/stretchr/testify v1.5.0
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2