summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile8
-rw-r--r--cmd/podman/login.go14
-rw-r--r--cmd/podman/logout.go12
-rw-r--r--cmd/podman/logs.go2
-rw-r--r--cmd/podman/service.go112
-rw-r--r--cmd/podman/shared/container.go8
-rw-r--r--cmd/podman/shared/create.go3
-rw-r--r--docs/source/markdown/libpod.conf.5.md3
-rw-r--r--docs/source/markdown/podman-login.1.md7
-rw-r--r--docs/source/markdown/podman-logout.1.md3
-rw-r--r--docs/source/markdown/podman-logs.1.md2
-rw-r--r--docs/source/markdown/podman-ps.1.md3
-rw-r--r--go.mod4
-rw-r--r--go.sum9
-rw-r--r--libpod/config/config.go166
-rw-r--r--libpod/config/config_test.go16
-rw-r--r--libpod/config/merge.go183
-rw-r--r--libpod/config/merge_test.go157
-rw-r--r--libpod/container_log_linux.go2
-rw-r--r--libpod/networking_linux.go14
-rw-r--r--libpod/oci_conmon_linux.go10
-rw-r--r--pkg/adapter/containers.go14
-rw-r--r--pkg/api/handlers/generic/containers_create.go (renamed from pkg/api/handlers/containers_create.go)34
-rw-r--r--pkg/api/handlers/generic/images.go50
-rw-r--r--pkg/api/handlers/generic/swagger.go6
-rw-r--r--pkg/api/handlers/images.go2
-rw-r--r--pkg/api/handlers/libpod/containers_create.go29
-rw-r--r--pkg/api/handlers/libpod/pods.go4
-rw-r--r--pkg/api/handlers/types.go30
-rw-r--r--pkg/api/handlers/utils/containers.go27
-rw-r--r--pkg/api/handlers/utils/images.go6
-rw-r--r--pkg/api/server/handler_api.go52
-rw-r--r--pkg/api/server/register_auth.go4
-rw-r--r--pkg/api/server/register_containers.go120
-rw-r--r--pkg/api/server/register_distribution.go2
-rw-r--r--pkg/api/server/register_events.go6
-rw-r--r--pkg/api/server/register_exec.go20
-rw-r--r--pkg/api/server/register_healthcheck.go2
-rw-r--r--pkg/api/server/register_images.go106
-rw-r--r--pkg/api/server/register_info.go2
-rw-r--r--pkg/api/server/register_monitor.go4
-rw-r--r--pkg/api/server/register_ping.go8
-rw-r--r--pkg/api/server/register_plugins.go4
-rw-r--r--pkg/api/server/register_pods.go43
-rw-r--r--pkg/api/server/register_swarm.go2
-rw-r--r--pkg/api/server/register_system.go4
-rw-r--r--pkg/api/server/register_version.go6
-rw-r--r--pkg/api/server/register_volumes.go16
-rw-r--r--pkg/api/server/server.go120
-rw-r--r--pkg/bindings/containers/create.go30
-rw-r--r--pkg/rootless/rootless_linux.go7
-rw-r--r--pkg/seccomp/seccomp.go2
-rw-r--r--pkg/spec/config_linux.go18
-rw-r--r--pkg/spec/config_unsupported.go4
-rw-r--r--pkg/spec/createconfig.go5
-rw-r--r--pkg/spec/parse.go14
-rw-r--r--pkg/spec/spec.go50
-rw-r--r--pkg/spec/storage.go4
-rw-r--r--pkg/specgen/config_linux_cgo.go62
-rw-r--r--pkg/specgen/config_linux_nocgo.go11
-rw-r--r--pkg/specgen/config_unsupported.go12
-rw-r--r--pkg/specgen/create.go187
-rw-r--r--pkg/specgen/namespaces.go467
-rw-r--r--pkg/specgen/oci.go260
-rw-r--r--pkg/specgen/specgen.go99
-rw-r--r--pkg/specgen/validate.go159
-rw-r--r--pkg/systemd/activation.go40
-rw-r--r--pkg/systemd/generate/systemdgen.go (renamed from pkg/systemdgen/systemdgen.go)2
-rw-r--r--pkg/systemd/generate/systemdgen_test.go (renamed from pkg/systemdgen/systemdgen_test.go)2
-rw-r--r--test/e2e/login_logout_test.go52
-rw-r--r--test/e2e/ps_test.go3
-rw-r--r--test/system/150-login.bats339
-rw-r--r--vendor/github.com/containers/storage/CODE-OF-CONDUCT.md3
-rw-r--r--vendor/github.com/containers/storage/Makefile11
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/go.mod4
-rw-r--r--vendor/github.com/containers/storage/go.sum4
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2156
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go101
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_others.go25
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go15
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_linux.go15
-rw-r--r--vendor/github.com/containers/storage/store.go7
-rw-r--r--vendor/github.com/containers/storage/utils.go52
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go24
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go9
-rw-r--r--vendor/github.com/klauspost/compress/flate/level1.go5
-rw-r--r--vendor/github.com/klauspost/compress/flate/level2.go6
-rw-r--r--vendor/github.com/klauspost/compress/flate/level3.go6
-rw-r--r--vendor/github.com/klauspost/compress/flate/level4.go4
-rw-r--r--vendor/github.com/klauspost/compress/flate/level5.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/level6.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/stateless.go67
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_go15.go29
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_posix.go7
-rw-r--r--vendor/github.com/mattn/go-shellwords/util_windows.go7
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go78
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go156
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go218
-rw-r--r--vendor/github.com/stretchr/testify/assert/forward_assertions.go2
-rw-r--r--vendor/github.com/stretchr/testify/require/forward_requirements.go2
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go210
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go156
-rw-r--r--vendor/github.com/stretchr/testify/require/requirements.go2
-rw-r--r--vendor/modules.txt8
107 files changed, 5525 insertions, 1167 deletions
diff --git a/Makefile b/Makefile
index 67e798c8d..95b5e04d3 100644
--- a/Makefile
+++ b/Makefile
@@ -151,7 +151,7 @@ endif
lint: golangci-lint
@echo "Linting vs commit '$(call err_if_empty,EPOCH_TEST_COMMIT)'"
ifeq ($(PRE_COMMIT),)
- @echo "FATAL: pre-commit was not found, check https://pre-commit.com/ about installing it." >&2
+ @echo "FATAL: pre-commit was not found, make .install.pre-commit to installing it." >&2
@exit 2
endif
$(PRE_COMMIT) run -a
@@ -562,6 +562,12 @@ endef
.install.golangci-lint: .gopathok
VERSION=1.18.0 GOBIN=$(GOBIN) sh ./hack/install_golangci.sh
+.PHONY: .install.pre-commit
+.install.pre-commit:
+ if [ -z "$(PRE_COMMIT)" ]; then \
+ python3 -m pip install --user pre-commit; \
+ fi
+
.PHONY: .install.md2man
.install.md2man: .gopathok
if [ ! -x "$(GOMD2MAN)" ]; then \
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index 369e0da16..e5ff273b8 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/registries"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -67,10 +68,19 @@ func loginCmd(c *cliconfig.LoginValues) error {
if len(args) > 1 {
return errors.Errorf("too many arguments, login takes only 1 argument")
}
+ var server string
if len(args) == 0 {
- return errors.Errorf("please specify a registry to login to")
+ registriesFromFile, err := registries.GetRegistries()
+ if err != nil || len(registriesFromFile) == 0 {
+ return errors.Errorf("please specify a registry to login to")
+ }
+
+ server = registriesFromFile[0]
+ logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
+
+ } else {
+ server = registryFromFullName(scrubServer(args[0]))
}
- server := registryFromFullName(scrubServer(args[0]))
sc := image.GetSystemContext("", c.Authfile, false)
if c.Flag("tls-verify").Changed {
diff --git a/cmd/podman/logout.go b/cmd/podman/logout.go
index 4a113b1d0..dec6822cf 100644
--- a/cmd/podman/logout.go
+++ b/cmd/podman/logout.go
@@ -8,7 +8,9 @@ import (
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
+ "github.com/containers/libpod/pkg/registries"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -51,10 +53,16 @@ func logoutCmd(c *cliconfig.LogoutValues) error {
if len(args) > 1 {
return errors.Errorf("too many arguments, logout takes at most 1 argument")
}
+ var server string
if len(args) == 0 && !c.All {
- return errors.Errorf("registry must be given")
+ registriesFromFile, err := registries.GetRegistries()
+ if err != nil || len(registriesFromFile) == 0 {
+ return errors.Errorf("no registries found in registries.conf, a registry must be provided")
+ }
+
+ server = registriesFromFile[0]
+ logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
}
- var server string
if len(args) == 1 {
server = scrubServer(args[0])
}
diff --git a/cmd/podman/logs.go b/cmd/podman/logs.go
index a2594b5bf..ebc53ddf8 100644
--- a/cmd/podman/logs.go
+++ b/cmd/podman/logs.go
@@ -15,7 +15,7 @@ var (
logsCommand cliconfig.LogsValues
logsDescription = `Retrieves logs for one or more containers.
- This does not guarantee execution order when combined with podman run (i.e. your run may not have generated any logs at the time you execute podman logs.
+ This does not guarantee execution order when combined with podman run (i.e. your run may not have generated any logs at the time you execute podman logs).
`
_logsCommand = &cobra.Command{
Use: "logs [flags] CONTAINER [CONTAINER...]",
diff --git a/cmd/podman/service.go b/cmd/podman/service.go
index 4978b5d51..3e0ff927f 100644
--- a/cmd/podman/service.go
+++ b/cmd/podman/service.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/libpod/pkg/adapter"
api "github.com/containers/libpod/pkg/api/server"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/systemd"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/pkg/varlinkapi"
"github.com/containers/libpod/version"
@@ -50,21 +51,52 @@ func init() {
serviceCommand.SetHelpTemplate(HelpTemplate())
serviceCommand.SetUsageTemplate(UsageTemplate())
flags := serviceCommand.Flags()
- flags.Int64VarP(&serviceCommand.Timeout, "timeout", "t", 1000, "Time until the service session expires in milliseconds. Use 0 to disable the timeout")
+ flags.Int64VarP(&serviceCommand.Timeout, "timeout", "t", 5, "Time until the service session expires in seconds. Use 0 to disable the timeout")
flags.BoolVar(&serviceCommand.Varlink, "varlink", false, "Use legacy varlink service instead of REST")
}
func serviceCmd(c *cliconfig.ServiceValues) error {
- // For V2, default to the REST socket
- apiURI := adapter.DefaultAPIAddress
+ apiURI, err := resolveApiURI(c)
+ if err != nil {
+ return err
+ }
+
+ // Create a single runtime api consumption
+ runtime, err := libpodruntime.GetRuntimeDisableFDs(getContext(), &c.PodmanCommand)
+ if err != nil {
+ return errors.Wrapf(err, "error creating libpod runtime")
+ }
+ defer func() {
+ if err := runtime.Shutdown(false); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to shutdown libpod runtime: %v", err)
+ }
+ }()
+
+ timeout := time.Duration(c.Timeout) * time.Second
if c.Varlink {
- apiURI = adapter.DefaultVarlinkAddress
+ return runVarlink(runtime, apiURI, timeout, c)
}
+ return runREST(runtime, apiURI, timeout)
+}
+
+func resolveApiURI(c *cliconfig.ServiceValues) (string, error) {
+ var apiURI string
- if rootless.IsRootless() {
+ // When determining _*THE*_ listening endpoint --
+ // 1) User input wins always
+ // 2) systemd socket activation
+ // 3) rootless honors XDG_RUNTIME_DIR
+ // 4) if varlink -- adapter.DefaultVarlinkAddress
+ // 5) lastly adapter.DefaultAPIAddress
+
+ if len(c.InputArgs) > 0 {
+ apiURI = c.InputArgs[0]
+ } else if ok := systemd.SocketActivated(); ok {
+ apiURI = ""
+ } else if rootless.IsRootless() {
xdg, err := util.GetRuntimeDir()
if err != nil {
- return err
+ return "", err
}
socketName := "podman.sock"
if c.Varlink {
@@ -74,53 +106,59 @@ func serviceCmd(c *cliconfig.ServiceValues) error {
if _, err := os.Stat(filepath.Dir(socketDir)); err != nil {
if os.IsNotExist(err) {
if err := os.Mkdir(filepath.Dir(socketDir), 0755); err != nil {
- return err
+ return "", err
}
} else {
- return err
+ return "", err
}
}
- apiURI = fmt.Sprintf("unix:%s", socketDir)
- }
-
- if len(c.InputArgs) > 0 {
- apiURI = c.InputArgs[0]
+ apiURI = "unix:" + socketDir
+ } else if c.Varlink {
+ apiURI = adapter.DefaultVarlinkAddress
+ } else {
+ // For V2, default to the REST socket
+ apiURI = adapter.DefaultAPIAddress
}
- logrus.Infof("using API endpoint: %s", apiURI)
-
- // Create a single runtime api consumption
- runtime, err := libpodruntime.GetRuntimeDisableFDs(getContext(), &c.PodmanCommand)
- if err != nil {
- return errors.Wrapf(err, "error creating libpod runtime")
+ if "" == apiURI {
+ logrus.Info("using systemd socket activation to determine API endpoint")
+ } else {
+ logrus.Infof("using API endpoint: %s", apiURI)
}
- defer runtime.DeferredShutdown(false)
-
- timeout := time.Duration(c.Timeout) * time.Millisecond
- if c.Varlink {
- return runVarlink(runtime, apiURI, timeout, c)
- }
- return runREST(runtime, apiURI, timeout)
+ return apiURI, nil
}
func runREST(r *libpod.Runtime, uri string, timeout time.Duration) error {
logrus.Warn("This function is EXPERIMENTAL")
fmt.Println("This function is EXPERIMENTAL.")
- fields := strings.Split(uri, ":")
- if len(fields) == 1 {
- return errors.Errorf("%s is an invalid socket destination", uri)
- }
- address := strings.Join(fields[1:], ":")
- l, err := net.Listen(fields[0], address)
- if err != nil {
- return errors.Wrapf(err, "unable to create socket %s", uri)
+
+ var listener *net.Listener
+ if uri != "" {
+ fields := strings.Split(uri, ":")
+ if len(fields) == 1 {
+ return errors.Errorf("%s is an invalid socket destination", uri)
+ }
+ address := strings.Join(fields[1:], ":")
+ l, err := net.Listen(fields[0], address)
+ if err != nil {
+ return errors.Wrapf(err, "unable to create socket %s", uri)
+ }
+ defer l.Close()
+ listener = &l
}
- defer l.Close()
- server, err := api.NewServerWithSettings(r, timeout, &l)
+ server, err := api.NewServerWithSettings(r, timeout, listener)
if err != nil {
return err
}
- return server.Serve()
+ defer func() {
+ if err := server.Shutdown(); err != nil {
+ fmt.Fprintf(os.Stderr, "Error when stopping service: %s", err)
+ }
+ }()
+
+ err = server.Serve()
+ logrus.Debugf("%d/%d Active connections/Total connections\n", server.ActiveConnections, server.TotalConnections)
+ return err
}
func runVarlink(r *libpod.Runtime, uri string, timeout time.Duration, c *cliconfig.ServiceValues) error {
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index ff3846e70..b5a1e7104 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -30,6 +30,7 @@ import (
const (
cidTruncLength = 12
podTruncLength = 12
+ iidTruncLength = 12
cmdTruncLength = 17
)
@@ -66,6 +67,7 @@ type BatchContainerStruct struct {
type PsContainerOutput struct {
ID string
Image string
+ ImageID string
Command string
Created string
Ports string
@@ -203,7 +205,7 @@ func NewBatchContainer(r *libpod.Runtime, ctr *libpod.Container, opts PsOptions)
status = "Error"
}
- _, imageName := ctr.Image()
+ imageID, imageName := ctr.Image()
cid := ctr.ID()
podID := ctr.PodID()
if !opts.NoTrunc {
@@ -214,6 +216,9 @@ func NewBatchContainer(r *libpod.Runtime, ctr *libpod.Container, opts PsOptions)
if len(command) > cmdTruncLength {
command = command[0:cmdTruncLength] + "..."
}
+ if len(imageID) > iidTruncLength {
+ imageID = imageID[0:iidTruncLength]
+ }
}
ports, err := ctr.PortMappings()
@@ -223,6 +228,7 @@ func NewBatchContainer(r *libpod.Runtime, ctr *libpod.Container, opts PsOptions)
pso.ID = cid
pso.Image = imageName
+ pso.ImageID = imageID
pso.Command = command
pso.Created = created
pso.Ports = portsToString(ports)
diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go
index 99538b3dc..5b244699c 100644
--- a/cmd/podman/shared/create.go
+++ b/cmd/podman/shared/create.go
@@ -701,9 +701,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
Sysctl: sysctl,
}
- if err := secConfig.SetLabelOpts(runtime, pid, ipc); err != nil {
- return nil, err
- }
if err := secConfig.SetSecurityOpts(runtime, c.StringArray("security-opt")); err != nil {
return nil, err
}
diff --git a/docs/source/markdown/libpod.conf.5.md b/docs/source/markdown/libpod.conf.5.md
index c28c80b56..ca45bccf6 100644
--- a/docs/source/markdown/libpod.conf.5.md
+++ b/docs/source/markdown/libpod.conf.5.md
@@ -83,7 +83,8 @@ libpod to manage containers.
containers and pods are visible.
**label**="true|false"
- Indicates whether the containers should use label separation.
+ Indicates whether the containers should use label separation by default.
+ Can be overridden via `--security-opt label=...` on the CLI.
**num_locks**=""
Number of locks available for containers and pods. Each created container or pod consumes one lock.
diff --git a/docs/source/markdown/podman-login.1.md b/docs/source/markdown/podman-login.1.md
index 8a84d359d..a69b311eb 100644
--- a/docs/source/markdown/podman-login.1.md
+++ b/docs/source/markdown/podman-login.1.md
@@ -4,11 +4,12 @@
podman\-login - Login to a container registry
## SYNOPSIS
-**podman login** [*options*] *registry*
+**podman login** [*options*] [*registry*]
## DESCRIPTION
**podman login** logs into a specified registry server with the correct username
-and password. **podman login** reads in the username and password from STDIN.
+and password. If the registry is not specified, the first registry under [registries.search]
+from registries.conf will be used. **podman login** reads in the username and password from STDIN.
The username and password can also be set using the **username** and **password** flags.
The path of the authentication file can be specified by the user by setting the **authfile**
flag. The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
@@ -17,7 +18,7 @@ flag. The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
**podman login [GLOBAL OPTIONS]**
-**podman login [OPTIONS] REGISTRY [GLOBAL OPTIONS]**
+**podman login [OPTIONS] [REGISTRY] [GLOBAL OPTIONS]**
## OPTIONS
diff --git a/docs/source/markdown/podman-logout.1.md b/docs/source/markdown/podman-logout.1.md
index 01dc52ecd..8b9f75760 100644
--- a/docs/source/markdown/podman-logout.1.md
+++ b/docs/source/markdown/podman-logout.1.md
@@ -8,7 +8,8 @@ podman\-logout - Logout of a container registry
## DESCRIPTION
**podman logout** logs out of a specified registry server by deleting the cached credentials
-stored in the **auth.json** file. The path of the authentication file can be overridden by the user by setting the **authfile** flag.
+stored in the **auth.json** file. If the registry is not specified, the first registry under [registries.search]
+from registries.conf will be used. The path of the authentication file can be overridden by the user by setting the **authfile** flag.
The default path used is **${XDG\_RUNTIME\_DIR}/containers/auth.json**.
All the cached credentials can be removed by setting the **all** flag.
diff --git a/docs/source/markdown/podman-logs.1.md b/docs/source/markdown/podman-logs.1.md
index 5507ba13a..66308c2b5 100644
--- a/docs/source/markdown/podman-logs.1.md
+++ b/docs/source/markdown/podman-logs.1.md
@@ -11,7 +11,7 @@ podman\-logs - Display the logs of one or more containers
## DESCRIPTION
The podman logs command batch-retrieves whatever logs are present for one or more containers at the time of execution.
This does not guarantee execution order when combined with podman run (i.e. your run may not have generated
-any logs at the time you execute podman logs
+any logs at the time you execute podman logs).
## OPTIONS
diff --git a/docs/source/markdown/podman-ps.1.md b/docs/source/markdown/podman-ps.1.md
index 23bf9f45d..2f8112aab 100644
--- a/docs/source/markdown/podman-ps.1.md
+++ b/docs/source/markdown/podman-ps.1.md
@@ -55,7 +55,8 @@ Valid placeholders for the Go template are listed below:
| **Placeholder** | **Description** |
| --------------- | ------------------------------------------------ |
| .ID | Container ID |
-| .Image | Image ID/Name |
+| .Image | Image Name/ID |
+| .ImageID | Image ID |
| .Command | Quoted command used |
| .CreatedAt | Creation time for container |
| .RunningFor | Time elapsed since container was started |
diff --git a/go.mod b/go.mod
index 34967bdd1..4248df975 100644
--- a/go.mod
+++ b/go.mod
@@ -14,7 +14,7 @@ require (
github.com/containers/conmon v2.0.10+incompatible
github.com/containers/image/v5 v5.2.1
github.com/containers/psgo v1.4.0
- github.com/containers/storage v1.15.8
+ github.com/containers/storage v1.16.0
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
@@ -63,7 +63,7 @@ require (
github.com/sirupsen/logrus v1.4.2
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.4.0
+ github.com/stretchr/testify v1.5.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/uber/jaeger-client-go v2.22.1+incompatible
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 // indirect
diff --git a/go.sum b/go.sum
index 97a5f1e31..58eb00433 100644
--- a/go.sum
+++ b/go.sum
@@ -87,6 +87,8 @@ github.com/containers/storage v1.15.3/go.mod h1:v0lq/3f+cXH3Y/HiDaFYRR0zilwDve7I
github.com/containers/storage v1.15.5/go.mod h1:v0lq/3f+cXH3Y/HiDaFYRR0zilwDve7I4W7U5xQxvF8=
github.com/containers/storage v1.15.8 h1:ef7OfUMTpyq0PIVAhV7qfufEI92gAldk25nItrip+6Q=
github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI=
+github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w=
+github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
@@ -260,6 +262,8 @@ github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y=
+github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@@ -283,6 +287,8 @@ github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.9 h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
+github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
@@ -426,12 +432,15 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw=
+github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
diff --git a/libpod/config/config.go b/libpod/config/config.go
index 13c128688..c72a0efc7 100644
--- a/libpod/config/config.go
+++ b/libpod/config/config.go
@@ -2,7 +2,7 @@ package config
import (
"bytes"
- "io/ioutil"
+ "fmt"
"os"
"os/exec"
"path/filepath"
@@ -287,18 +287,16 @@ type DBConfig struct {
}
// readConfigFromFile reads the specified config file at `path` and attempts to
-// unmarshal its content into a Config.
-func readConfigFromFile(path string) (*Config, error) {
- var config Config
-
- configBytes, err := ioutil.ReadFile(path)
+// unmarshal its content into a Config. The config param specifies the previous
+// default config. If the path, only specifies a few fields in the Toml file
+// the defaults from the config parameter will be used for all other fields.
+func readConfigFromFile(path string, config *Config) (*Config, error) {
+ logrus.Debugf("Reading configuration file %q", path)
+ _, err := toml.DecodeFile(path, config)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("unable to decode configuration %v: %v", path, err)
}
- logrus.Debugf("Reading configuration file %q", path)
- err = toml.Unmarshal(configBytes, &config)
-
// For the sake of backwards compat we need to check if the config fields
// with *Set suffix are set in the config. Note that the storage-related
// fields are NOT set in the config here but in the storage.conf OR directly
@@ -313,7 +311,7 @@ func readConfigFromFile(path string) (*Config, error) {
config.TmpDirSet = true
}
- return &config, err
+ return config, err
}
// Write decodes the config as TOML and writes it to the specified path.
@@ -439,15 +437,11 @@ func probeConmon(conmonBinary string) error {
// with cgroupsv2. Other OCI runtimes are not yet supporting cgroupsv2. This
// might change in the future.
func NewConfig(userConfigPath string) (*Config, error) {
- config := &Config{} // start with an empty config
-
- // First, try to read the user-specified config
- if userConfigPath != "" {
- var err error
- config, err = readConfigFromFile(userConfigPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath)
- }
+ // Start with the default config and interatively merge fields in the system
+ // configs.
+ config, err := defaultConfigFromMemory()
+ if err != nil {
+ return nil, err
}
// Now, check if the user can access system configs and merge them if needed.
@@ -456,44 +450,45 @@ func NewConfig(userConfigPath string) (*Config, error) {
return nil, errors.Wrapf(err, "error finding config on system")
}
- migrated := false
for _, path := range configs {
- systemConfig, err := readConfigFromFile(path)
+ config, err = readConfigFromFile(path, config)
if err != nil {
return nil, errors.Wrapf(err, "error reading system config %q", path)
}
- // Handle CGroups v2 configuration migration.
- // Migrate only the first config, and do it before
- // merging.
- if !migrated {
- if err := cgroupV2Check(path, systemConfig); err != nil {
- return nil, errors.Wrapf(err, "error rewriting configuration file %s", userConfigPath)
- }
- migrated = true
- }
- // Merge the it into the config. Any unset field in config will be
- // over-written by the systemConfig.
- if err := config.mergeConfig(systemConfig); err != nil {
- return nil, errors.Wrapf(err, "error merging system config")
- }
- logrus.Debugf("Merged system config %q: %v", path, config)
}
- // Finally, create a default config from memory and forcefully merge it into
- // the config. This way we try to make sure that all fields are properly set
- // and that user AND system config can partially set.
- defaultConfig, err := defaultConfigFromMemory()
- if err != nil {
- return nil, errors.Wrapf(err, "error generating default config from memory")
+ // First, try to read the user-specified config
+ if userConfigPath != "" {
+ var err error
+ config, err = readConfigFromFile(userConfigPath, config)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath)
+ }
}
- // Check if we need to switch to cgroupfs and logger=file on rootless.
- defaultConfig.checkCgroupsAndLogger()
-
- if err := config.mergeConfig(defaultConfig); err != nil {
- return nil, errors.Wrapf(err, "error merging default config from memory")
+ // Since runc does not currently support cgroupV2
+ // Change to default crun on first running of libpod.conf
+ // TODO Once runc has support for cgroups, this function should be removed.
+ if !config.CgroupCheck && rootless.IsRootless() {
+ cgroupsV2, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, err
+ }
+ if cgroupsV2 {
+ path, err := exec.LookPath("crun")
+ if err != nil {
+ // Can't find crun path so do nothing
+ logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err)
+ } else {
+ config.CgroupCheck = true
+ config.OCIRuntime = path
+ }
+ }
}
+ // If we need to, switch to cgroupfs and logger=file on rootless.
+ config.checkCgroupsAndLogger()
+
// Relative paths can cause nasty bugs, because core paths we use could
// shift between runs (or even parts of the program - the OCI runtime
// uses a different working directory than we do, for example.
@@ -532,12 +527,12 @@ func systemConfigs() ([]string, error) {
}
configs := []string{}
- if _, err := os.Stat(_rootOverrideConfigPath); err == nil {
- configs = append(configs, _rootOverrideConfigPath)
- }
if _, err := os.Stat(_rootConfigPath); err == nil {
configs = append(configs, _rootConfigPath)
}
+ if _, err := os.Stat(_rootOverrideConfigPath); err == nil {
+ configs = append(configs, _rootOverrideConfigPath)
+ }
return configs, nil
}
@@ -568,29 +563,56 @@ func (c *Config) checkCgroupsAndLogger() {
}
}
-// Since runc does not currently support cgroupV2
-// Change to default crun on first running of libpod.conf
-// TODO Once runc has support for cgroups, this function should be removed.
-func cgroupV2Check(configPath string, tmpConfig *Config) error {
- if !tmpConfig.CgroupCheck && rootless.IsRootless() {
- logrus.Debugf("Rewriting %s for CGroup v2 upgrade", configPath)
- cgroupsV2, err := cgroups.IsCgroup2UnifiedMode()
- if err != nil {
- return err
+// MergeDBConfig merges the configuration from the database.
+func (c *Config) MergeDBConfig(dbConfig *DBConfig) error {
+
+ if !c.StorageConfigRunRootSet && dbConfig.StorageTmp != "" {
+ if c.StorageConfig.RunRoot != dbConfig.StorageTmp &&
+ c.StorageConfig.RunRoot != "" {
+ logrus.Debugf("Overriding run root %q with %q from database",
+ c.StorageConfig.RunRoot, dbConfig.StorageTmp)
}
- if cgroupsV2 {
- path, err := exec.LookPath("crun")
- if err != nil {
- logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err)
- // Can't find crun path so do nothing
- return nil
- }
- tmpConfig.CgroupCheck = true
- tmpConfig.OCIRuntime = path
- if err := tmpConfig.Write(configPath); err != nil {
- return err
- }
+ c.StorageConfig.RunRoot = dbConfig.StorageTmp
+ }
+
+ if !c.StorageConfigGraphRootSet && dbConfig.StorageRoot != "" {
+ if c.StorageConfig.GraphRoot != dbConfig.StorageRoot &&
+ c.StorageConfig.GraphRoot != "" {
+ logrus.Debugf("Overriding graph root %q with %q from database",
+ c.StorageConfig.GraphRoot, dbConfig.StorageRoot)
+ }
+ c.StorageConfig.GraphRoot = dbConfig.StorageRoot
+ }
+
+ if !c.StorageConfigGraphDriverNameSet && dbConfig.GraphDriver != "" {
+ if c.StorageConfig.GraphDriverName != dbConfig.GraphDriver &&
+ c.StorageConfig.GraphDriverName != "" {
+ logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
+ c.StorageConfig.GraphDriverName, dbConfig.GraphDriver)
+ }
+ c.StorageConfig.GraphDriverName = dbConfig.GraphDriver
+ }
+
+ if !c.StaticDirSet && dbConfig.LibpodRoot != "" {
+ if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
+ logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
+ }
+ c.StaticDir = dbConfig.LibpodRoot
+ }
+
+ if !c.TmpDirSet && dbConfig.LibpodTmp != "" {
+ if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
+ logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
+ }
+ c.TmpDir = dbConfig.LibpodTmp
+ c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
+ }
+
+ if !c.VolumePathSet && dbConfig.VolumePath != "" {
+ if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
+ logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
}
+ c.VolumePath = dbConfig.VolumePath
}
return nil
}
diff --git a/libpod/config/config_test.go b/libpod/config/config_test.go
index 47c092440..24620ce0e 100644
--- a/libpod/config/config_test.go
+++ b/libpod/config/config_test.go
@@ -11,14 +11,14 @@ import (
func TestEmptyConfig(t *testing.T) {
// Make sure that we can read empty configs
- config, err := readConfigFromFile("testdata/empty.conf")
+ config, err := readConfigFromFile("testdata/empty.conf", &Config{})
assert.NotNil(t, config)
assert.Nil(t, err)
}
func TestDefaultLibpodConf(t *testing.T) {
// Make sure that we can read the default libpod.conf
- config, err := readConfigFromFile("testdata/libpod.conf")
+ config, err := readConfigFromFile("testdata/libpod.conf", &Config{})
assert.NotNil(t, config)
assert.Nil(t, err)
}
@@ -32,13 +32,10 @@ func TestMergeEmptyAndDefaultMemoryConfig(t *testing.T) {
defaultConfig.StateType = define.InvalidStateStore
defaultConfig.StorageConfig = storage.StoreOptions{}
- emptyConfig, err := readConfigFromFile("testdata/empty.conf")
+ emptyConfig, err := readConfigFromFile("testdata/empty.conf", defaultConfig)
assert.NotNil(t, emptyConfig)
assert.Nil(t, err)
- err = emptyConfig.mergeConfig(defaultConfig)
- assert.Nil(t, err)
-
equal := reflect.DeepEqual(emptyConfig, defaultConfig)
assert.True(t, equal)
}
@@ -46,19 +43,16 @@ func TestMergeEmptyAndDefaultMemoryConfig(t *testing.T) {
func TestMergeEmptyAndLibpodConfig(t *testing.T) {
// Make sure that when we merge the default config into an empty one that we
// effectively get the default config.
- libpodConfig, err := readConfigFromFile("testdata/libpod.conf")
+ libpodConfig, err := readConfigFromFile("testdata/libpod.conf", &Config{})
assert.NotNil(t, libpodConfig)
assert.Nil(t, err)
libpodConfig.StateType = define.InvalidStateStore
libpodConfig.StorageConfig = storage.StoreOptions{}
- emptyConfig, err := readConfigFromFile("testdata/empty.conf")
+ emptyConfig, err := readConfigFromFile("testdata/empty.conf", libpodConfig)
assert.NotNil(t, emptyConfig)
assert.Nil(t, err)
- err = emptyConfig.mergeConfig(libpodConfig)
- assert.Nil(t, err)
-
equal := reflect.DeepEqual(emptyConfig, libpodConfig)
assert.True(t, equal)
}
diff --git a/libpod/config/merge.go b/libpod/config/merge.go
deleted file mode 100644
index 798a63da7..000000000
--- a/libpod/config/merge.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package config
-
-import (
- "path/filepath"
-
- "github.com/containers/libpod/libpod/define"
- "github.com/sirupsen/logrus"
-)
-
-// Merge merges the other config into the current one. Note that a field of the
-// other config is only merged when it's not already set in the current one.
-//
-// Note that the StateType and the StorageConfig will NOT be changed.
-func (c *Config) mergeConfig(other *Config) error {
- // strings
- c.CgroupManager = mergeStrings(c.CgroupManager, other.CgroupManager)
- c.CNIConfigDir = mergeStrings(c.CNIConfigDir, other.CNIConfigDir)
- c.CNIDefaultNetwork = mergeStrings(c.CNIDefaultNetwork, other.CNIDefaultNetwork)
- c.DefaultMountsFile = mergeStrings(c.DefaultMountsFile, other.DefaultMountsFile)
- c.DetachKeys = mergeStrings(c.DetachKeys, other.DetachKeys)
- c.EventsLogFilePath = mergeStrings(c.EventsLogFilePath, other.EventsLogFilePath)
- c.EventsLogger = mergeStrings(c.EventsLogger, other.EventsLogger)
- c.ImageDefaultTransport = mergeStrings(c.ImageDefaultTransport, other.ImageDefaultTransport)
- c.InfraCommand = mergeStrings(c.InfraCommand, other.InfraCommand)
- c.InfraImage = mergeStrings(c.InfraImage, other.InfraImage)
- c.InitPath = mergeStrings(c.InitPath, other.InitPath)
- c.LockType = mergeStrings(c.LockType, other.LockType)
- c.Namespace = mergeStrings(c.Namespace, other.Namespace)
- c.NetworkCmdPath = mergeStrings(c.NetworkCmdPath, other.NetworkCmdPath)
- c.OCIRuntime = mergeStrings(c.OCIRuntime, other.OCIRuntime)
- c.SignaturePolicyPath = mergeStrings(c.SignaturePolicyPath, other.SignaturePolicyPath)
- c.StaticDir = mergeStrings(c.StaticDir, other.StaticDir)
- c.TmpDir = mergeStrings(c.TmpDir, other.TmpDir)
- c.VolumePath = mergeStrings(c.VolumePath, other.VolumePath)
-
- // string map of slices
- c.OCIRuntimes = mergeStringMaps(c.OCIRuntimes, other.OCIRuntimes)
-
- // string slices
- c.CNIPluginDir = mergeStringSlices(c.CNIPluginDir, other.CNIPluginDir)
- c.ConmonEnvVars = mergeStringSlices(c.ConmonEnvVars, other.ConmonEnvVars)
- c.ConmonPath = mergeStringSlices(c.ConmonPath, other.ConmonPath)
- c.HooksDir = mergeStringSlices(c.HooksDir, other.HooksDir)
- c.RuntimePath = mergeStringSlices(c.RuntimePath, other.RuntimePath)
- c.RuntimeSupportsJSON = mergeStringSlices(c.RuntimeSupportsJSON, other.RuntimeSupportsJSON)
- c.RuntimeSupportsNoCgroups = mergeStringSlices(c.RuntimeSupportsNoCgroups, other.RuntimeSupportsNoCgroups)
-
- // int64s
- c.MaxLogSize = mergeInt64s(c.MaxLogSize, other.MaxLogSize)
-
- // uint32s
- c.NumLocks = mergeUint32s(c.NumLocks, other.NumLocks)
-
- // bools
- c.EnableLabeling = mergeBools(c.EnableLabeling, other.EnableLabeling)
- c.EnablePortReservation = mergeBools(c.EnablePortReservation, other.EnablePortReservation)
- c.NoPivotRoot = mergeBools(c.NoPivotRoot, other.NoPivotRoot)
- c.SDNotify = mergeBools(c.SDNotify, other.SDNotify)
-
- // state type
- if c.StateType == define.InvalidStateStore {
- c.StateType = other.StateType
- }
-
- // store options - need to check all fields since some configs might only
- // set it partially
- c.StorageConfig.RunRoot = mergeStrings(c.StorageConfig.RunRoot, other.StorageConfig.RunRoot)
- c.StorageConfig.GraphRoot = mergeStrings(c.StorageConfig.GraphRoot, other.StorageConfig.GraphRoot)
- c.StorageConfig.GraphDriverName = mergeStrings(c.StorageConfig.GraphDriverName, other.StorageConfig.GraphDriverName)
- c.StorageConfig.GraphDriverOptions = mergeStringSlices(c.StorageConfig.GraphDriverOptions, other.StorageConfig.GraphDriverOptions)
- if c.StorageConfig.UIDMap == nil {
- c.StorageConfig.UIDMap = other.StorageConfig.UIDMap
- }
- if c.StorageConfig.GIDMap == nil {
- c.StorageConfig.GIDMap = other.StorageConfig.GIDMap
- }
-
- // backwards compat *Set fields
- c.StorageConfigRunRootSet = mergeBools(c.StorageConfigRunRootSet, other.StorageConfigRunRootSet)
- c.StorageConfigGraphRootSet = mergeBools(c.StorageConfigGraphRootSet, other.StorageConfigGraphRootSet)
- c.StorageConfigGraphDriverNameSet = mergeBools(c.StorageConfigGraphDriverNameSet, other.StorageConfigGraphDriverNameSet)
- c.VolumePathSet = mergeBools(c.VolumePathSet, other.VolumePathSet)
- c.StaticDirSet = mergeBools(c.StaticDirSet, other.StaticDirSet)
- c.TmpDirSet = mergeBools(c.TmpDirSet, other.TmpDirSet)
-
- return nil
-}
-
-// MergeDBConfig merges the configuration from the database.
-func (c *Config) MergeDBConfig(dbConfig *DBConfig) error {
-
- if !c.StorageConfigRunRootSet && dbConfig.StorageTmp != "" {
- if c.StorageConfig.RunRoot != dbConfig.StorageTmp &&
- c.StorageConfig.RunRoot != "" {
- logrus.Debugf("Overriding run root %q with %q from database",
- c.StorageConfig.RunRoot, dbConfig.StorageTmp)
- }
- c.StorageConfig.RunRoot = dbConfig.StorageTmp
- }
-
- if !c.StorageConfigGraphRootSet && dbConfig.StorageRoot != "" {
- if c.StorageConfig.GraphRoot != dbConfig.StorageRoot &&
- c.StorageConfig.GraphRoot != "" {
- logrus.Debugf("Overriding graph root %q with %q from database",
- c.StorageConfig.GraphRoot, dbConfig.StorageRoot)
- }
- c.StorageConfig.GraphRoot = dbConfig.StorageRoot
- }
-
- if !c.StorageConfigGraphDriverNameSet && dbConfig.GraphDriver != "" {
- if c.StorageConfig.GraphDriverName != dbConfig.GraphDriver &&
- c.StorageConfig.GraphDriverName != "" {
- logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
- c.StorageConfig.GraphDriverName, dbConfig.GraphDriver)
- }
- c.StorageConfig.GraphDriverName = dbConfig.GraphDriver
- }
-
- if !c.StaticDirSet && dbConfig.LibpodRoot != "" {
- if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
- logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
- }
- c.StaticDir = dbConfig.LibpodRoot
- }
-
- if !c.TmpDirSet && dbConfig.LibpodTmp != "" {
- if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
- logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
- }
- c.TmpDir = dbConfig.LibpodTmp
- c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
- }
-
- if !c.VolumePathSet && dbConfig.VolumePath != "" {
- if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
- logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
- }
- c.VolumePath = dbConfig.VolumePath
- }
- return nil
-}
-
-func mergeStrings(a, b string) string {
- if a == "" {
- return b
- }
- return a
-}
-
-func mergeStringSlices(a, b []string) []string {
- if len(a) == 0 && b != nil {
- return b
- }
- return a
-}
-
-func mergeStringMaps(a, b map[string][]string) map[string][]string {
- if len(a) == 0 && b != nil {
- return b
- }
- return a
-}
-
-func mergeInt64s(a, b int64) int64 {
- if a == 0 {
- return b
- }
- return a
-}
-
-func mergeUint32s(a, b uint32) uint32 {
- if a == 0 {
- return b
- }
- return a
-}
-
-func mergeBools(a, b bool) bool {
- if !a {
- return b
- }
- return a
-}
diff --git a/libpod/config/merge_test.go b/libpod/config/merge_test.go
deleted file mode 100644
index eb450b273..000000000
--- a/libpod/config/merge_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package config
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestMergeStrings(t *testing.T) {
- testData := []struct {
- a string
- b string
- res string
- }{
- {"", "", ""},
- {"a", "", "a"},
- {"a", "b", "a"},
- {"", "b", "b"},
- }
- for _, data := range testData {
- res := mergeStrings(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeStringSlices(t *testing.T) {
- testData := []struct {
- a []string
- b []string
- res []string
- }{
- {
- nil, nil, nil,
- },
- {
- nil,
- []string{},
- []string{},
- },
- {
- []string{},
- nil,
- []string{},
- },
- {
- []string{},
- []string{},
- []string{},
- },
- {
- []string{"a"},
- []string{},
- []string{"a"},
- },
- {
- []string{"a"},
- []string{"b"},
- []string{"a"},
- },
- {
- []string{},
- []string{"b"},
- []string{"b"},
- },
- }
- for _, data := range testData {
- res := mergeStringSlices(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeStringMaps(t *testing.T) {
- testData := []struct {
- a map[string][]string
- b map[string][]string
- res map[string][]string
- }{
- {
- nil, nil, nil,
- },
- {
- nil,
- map[string][]string{},
- map[string][]string{}},
- {
- map[string][]string{"a": {"a"}},
- nil,
- map[string][]string{"a": {"a"}},
- },
- {
- nil,
- map[string][]string{"b": {"b"}},
- map[string][]string{"b": {"b"}},
- },
- {
- map[string][]string{"a": {"a"}},
- map[string][]string{"b": {"b"}},
- map[string][]string{"a": {"a"}},
- },
- }
- for _, data := range testData {
- res := mergeStringMaps(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeInts64(t *testing.T) {
- testData := []struct {
- a int64
- b int64
- res int64
- }{
- {int64(0), int64(0), int64(0)},
- {int64(1), int64(0), int64(1)},
- {int64(0), int64(1), int64(1)},
- {int64(2), int64(1), int64(2)},
- {int64(-1), int64(1), int64(-1)},
- {int64(0), int64(-1), int64(-1)},
- }
- for _, data := range testData {
- res := mergeInt64s(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-func TestMergeUint32(t *testing.T) {
- testData := []struct {
- a uint32
- b uint32
- res uint32
- }{
- {uint32(0), uint32(0), uint32(0)},
- {uint32(1), uint32(0), uint32(1)},
- {uint32(0), uint32(1), uint32(1)},
- {uint32(2), uint32(1), uint32(2)},
- }
- for _, data := range testData {
- res := mergeUint32s(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
-
-func TestMergeBools(t *testing.T) {
- testData := []struct {
- a bool
- b bool
- res bool
- }{
- {false, false, false},
- {true, false, true},
- {false, true, true},
- {true, true, true},
- }
- for _, data := range testData {
- res := mergeBools(data.a, data.b)
- assert.Equal(t, data.res, res)
- }
-}
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index c4acc3d4f..748715ed3 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -40,7 +40,7 @@ func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *l
defaultTime := time.Time{}
if options.Since != defaultTime {
// coreos/go-systemd/sdjournal doesn't correctly handle requests for data in the future
- // return nothing instead of fasely printing
+ // return nothing instead of falsely printing
if time.Now().Before(options.Since) {
return nil
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index d90bcb708..fa8593f20 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -335,10 +335,13 @@ func (r *Runtime) setupRootlessPortMapping(ctr *Container, netnsPath string) (er
return errors.Wrapf(err, "delete file %s", logPath)
}
- ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
- if err != nil {
- return errors.Wrapf(err, "failed to create rootless port sync pipe")
+ if !ctr.config.PostConfigureNetNS {
+ ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "failed to create rootless port sync pipe")
+ }
}
+
cfg := rootlessport.Config{
Mappings: ctr.config.PortMappings,
NetNSPath: netnsPath,
@@ -355,6 +358,11 @@ func (r *Runtime) setupRootlessPortMapping(ctr *Container, netnsPath string) (er
cmd := exec.Command(fmt.Sprintf("/proc/%d/exe", os.Getpid()))
cmd.Args = []string{rootlessport.ReexecKey}
// Leak one end of the pipe in rootlessport process, the other will be sent to conmon
+
+ if ctr.rootlessPortSyncR != nil {
+ defer errorhandling.CloseQuiet(ctr.rootlessPortSyncR)
+ }
+
cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessPortSyncR, syncW)
cmd.Stdin = cfgR
// stdout is for human-readable error, stderr is for debug log
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 722012386..07d38693f 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1161,6 +1161,13 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
if ctr.config.NetMode.IsSlirp4netns() {
if ctr.config.PostConfigureNetNS {
+ havePortMapping := len(ctr.Config().PortMappings) > 0
+ if havePortMapping {
+ ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
+ if err != nil {
+ return errors.Wrapf(err, "failed to create rootless port sync pipe")
+ }
+ }
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
if err != nil {
return errors.Wrapf(err, "failed to create rootless network sync pipe")
@@ -1176,9 +1183,6 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
// Leak one end in conmon, the other one will be leaked into slirp4netns
cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW)
- if ctr.rootlessPortSyncR != nil {
- defer errorhandling.CloseQuiet(ctr.rootlessPortSyncR)
- }
if ctr.rootlessPortSyncW != nil {
defer errorhandling.CloseQuiet(ctr.rootlessPortSyncW)
// Leak one end in conmon, the other one will be leaked into rootlessport
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index cada93829..170b2e24e 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -26,7 +26,7 @@ import (
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/adapter/shortcuts"
- "github.com/containers/libpod/pkg/systemdgen"
+ "github.com/containers/libpod/pkg/systemd/generate"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -1154,7 +1154,7 @@ func generateServiceName(c *cliconfig.GenerateSystemdValues, ctr *libpod.Contain
// generateSystemdgenContainerInfo is a helper to generate a
// systemdgen.ContainerInfo for `GenerateSystemd`.
-func (r *LocalRuntime) generateSystemdgenContainerInfo(c *cliconfig.GenerateSystemdValues, nameOrID string, pod *libpod.Pod) (*systemdgen.ContainerInfo, bool, error) {
+func (r *LocalRuntime) generateSystemdgenContainerInfo(c *cliconfig.GenerateSystemdValues, nameOrID string, pod *libpod.Pod) (*generate.ContainerInfo, bool, error) {
ctr, err := r.Runtime.LookupContainer(nameOrID)
if err != nil {
return nil, false, err
@@ -1172,7 +1172,7 @@ func (r *LocalRuntime) generateSystemdgenContainerInfo(c *cliconfig.GenerateSyst
}
name, serviceName := generateServiceName(c, ctr, pod)
- info := &systemdgen.ContainerInfo{
+ info := &generate.ContainerInfo{
ServiceName: serviceName,
ContainerName: name,
RestartPolicy: c.RestartPolicy,
@@ -1187,7 +1187,7 @@ func (r *LocalRuntime) generateSystemdgenContainerInfo(c *cliconfig.GenerateSyst
// GenerateSystemd creates a unit file for a container or pod.
func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (string, error) {
- opts := systemdgen.Options{
+ opts := generate.Options{
Files: c.Files,
New: c.New,
}
@@ -1196,7 +1196,7 @@ func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (stri
if info, found, err := r.generateSystemdgenContainerInfo(c, c.InputArgs[0], nil); found && err != nil {
return "", err
} else if found && err == nil {
- return systemdgen.CreateContainerSystemdUnit(info, opts)
+ return generate.CreateContainerSystemdUnit(info, opts)
}
// --new does not support pods.
@@ -1242,7 +1242,7 @@ func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (stri
// Traverse the dependency graph and create systemdgen.ContainerInfo's for
// each container.
- containerInfos := []*systemdgen.ContainerInfo{podInfo}
+ containerInfos := []*generate.ContainerInfo{podInfo}
for ctr, dependencies := range graph.DependencyMap() {
// Skip the infra container as we already generated it.
if ctr.ID() == infraID {
@@ -1272,7 +1272,7 @@ func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (stri
if i > 0 {
builder.WriteByte('\n')
}
- out, err := systemdgen.CreateContainerSystemdUnit(info, opts)
+ out, err := generate.CreateContainerSystemdUnit(info, opts)
if err != nil {
return "", err
}
diff --git a/pkg/api/handlers/containers_create.go b/pkg/api/handlers/generic/containers_create.go
index 48f0de94d..7e542752f 100644
--- a/pkg/api/handlers/containers_create.go
+++ b/pkg/api/handlers/generic/containers_create.go
@@ -1,4 +1,4 @@
-package handlers
+package generic
import (
"encoding/json"
@@ -6,10 +6,10 @@ import (
"net/http"
"strings"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
image2 "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/signal"
@@ -17,14 +17,13 @@ import (
"github.com/containers/storage"
"github.com/gorilla/schema"
"github.com/pkg/errors"
- log "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func CreateContainer(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
- input := CreateContainerConfig{}
+ input := handlers.CreateContainerConfig{}
query := struct {
Name string `schema:"name"`
}{
@@ -52,34 +51,11 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "makeCreatConfig()"))
return
}
-
cc.Name = query.Name
- var pod *libpod.Pod
- ctr, err := shared.CreateContainerFromCreateConfig(runtime, &cc, r.Context(), pod)
- if err != nil {
- if strings.Contains(err.Error(), "invalid log driver") {
- // this does not quite work yet and needs a little more massaging
- w.Header().Set("Content-Type", "text/plain; charset=us-ascii")
- w.WriteHeader(http.StatusInternalServerError)
- msg := fmt.Sprintf("logger: no log driver named '%s' is registered", input.HostConfig.LogConfig.Type)
- if _, err := fmt.Fprintln(w, msg); err != nil {
- log.Errorf("%s: %q", msg, err)
- }
- //s.WriteResponse(w, http.StatusInternalServerError, fmt.Sprintf("logger: no log driver named '%s' is registered", input.HostConfig.LogConfig.Type))
- return
- }
- utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "CreateContainerFromCreateConfig()"))
- return
- }
-
- response := ContainerCreateResponse{
- ID: ctr.ID(),
- Warnings: []string{}}
-
- utils.WriteResponse(w, http.StatusCreated, response)
+ utils.CreateContainer(r.Context(), w, runtime, &cc)
}
-func makeCreateConfig(input CreateContainerConfig, newImage *image2.Image) (createconfig.CreateConfig, error) {
+func makeCreateConfig(input handlers.CreateContainerConfig, newImage *image2.Image) (createconfig.CreateConfig, error) {
var (
err error
init bool
diff --git a/pkg/api/handlers/generic/images.go b/pkg/api/handlers/generic/images.go
index c65db7575..1ced499d9 100644
--- a/pkg/api/handlers/generic/images.go
+++ b/pkg/api/handlers/generic/images.go
@@ -106,14 +106,14 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
query := struct {
- author string
- changes string
- comment string
- container string
+ Author string `schema:"author"`
+ Changes string `schema:"changes"`
+ Comment string `schema:"comment"`
+ Container string `schema:"container"`
//fromSrc string # fromSrc is currently unused
- pause bool
- repo string
- tag string
+ Pause bool `schema:"pause"`
+ Repo string `schema:"repo"`
+ Tag string `schema:"tag"`
}{
// This is where you can override the golang default value for one of fields
}
@@ -145,22 +145,22 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
return
}
- if len(query.tag) > 0 {
- tag = query.tag
+ if len(query.Tag) > 0 {
+ tag = query.Tag
}
- options.Message = query.comment
- options.Author = query.author
- options.Pause = query.pause
- options.Changes = strings.Fields(query.changes)
- ctr, err := runtime.LookupContainer(query.container)
+ options.Message = query.Comment
+ options.Author = query.Author
+ options.Pause = query.Pause
+ options.Changes = strings.Fields(query.Changes)
+ ctr, err := runtime.LookupContainer(query.Container)
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusNotFound, err)
return
}
// I know mitr hates this ... but doing for now
- if len(query.repo) > 1 {
- destImage = fmt.Sprintf("%s:%s", query.repo, tag)
+ if len(query.Repo) > 1 {
+ destImage = fmt.Sprintf("%s:%s", query.Repo, tag)
}
commitImage, err := ctr.Commit(r.Context(), destImage, options)
@@ -179,8 +179,8 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
query := struct {
- fromSrc string
- changes []string
+ FromSrc string `schema:"fromSrc"`
+ Changes []string `schema:"changes"`
}{
// This is where you can override the golang default value for one of fields
}
@@ -190,7 +190,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
return
}
// fromSrc – Source to import. The value may be a URL from which the image can be retrieved or - to read the image from the request body. This parameter may only be used when importing an image.
- source := query.fromSrc
+ source := query.FromSrc
if source == "-" {
f, err := ioutil.TempFile("", "api_load.tar")
if err != nil {
@@ -202,7 +202,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to write temporary file"))
}
}
- iid, err := runtime.Import(r.Context(), source, "", query.changes, "", false)
+ iid, err := runtime.Import(r.Context(), source, "", query.Changes, "", false)
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to import tarball"))
return
@@ -238,8 +238,8 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
query := struct {
- fromImage string
- tag string
+ FromImage string `schema:"fromImage"`
+ Tag string `schema:"tag"`
}{
// This is where you can override the golang default value for one of fields
}
@@ -254,9 +254,9 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
repo – Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image.
tag – Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled.
*/
- fromImage := query.fromImage
- if len(query.tag) < 1 {
- fromImage = fmt.Sprintf("%s:%s", fromImage, query.tag)
+ fromImage := query.FromImage
+ if len(query.Tag) >= 1 {
+ fromImage = fmt.Sprintf("%s:%s", fromImage, query.Tag)
}
// TODO
diff --git a/pkg/api/handlers/generic/swagger.go b/pkg/api/handlers/generic/swagger.go
index bfe527c41..c9c9610bb 100644
--- a/pkg/api/handlers/generic/swagger.go
+++ b/pkg/api/handlers/generic/swagger.go
@@ -1,13 +1,15 @@
package generic
-import "github.com/containers/libpod/pkg/api/handlers"
+import (
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+)
// Create container
// swagger:response ContainerCreateResponse
type swagCtrCreateResponse struct {
// in:body
Body struct {
- handlers.ContainerCreateResponse
+ utils.ContainerCreateResponse
}
}
diff --git a/pkg/api/handlers/images.go b/pkg/api/handlers/images.go
index cd3c0b93f..e4e394d68 100644
--- a/pkg/api/handlers/images.go
+++ b/pkg/api/handlers/images.go
@@ -155,7 +155,7 @@ func SearchImages(w http.ResponseWriter, r *http.Request) {
}
results, err := image.SearchImages(query.Term, options)
if err != nil {
- utils.InternalServerError(w, err)
+ utils.BadRequest(w, "term", query.Term, err)
}
utils.WriteResponse(w, http.StatusOK, results)
}
diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go
new file mode 100644
index 000000000..ebca41151
--- /dev/null
+++ b/pkg/api/handlers/libpod/containers_create.go
@@ -0,0 +1,29 @@
+package libpod
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/specgen"
+ "github.com/pkg/errors"
+)
+
+// CreateContainer takes a specgenerator and makes a container. It returns
+// the new container ID on success along with any warnings.
+func CreateContainer(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ var sg specgen.SpecGenerator
+ if err := json.NewDecoder(r.Body).Decode(&sg); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ return
+ }
+ ctr, err := sg.MakeContainer(runtime)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ response := utils.ContainerCreateResponse{ID: ctr.ID()}
+ utils.WriteJSON(w, http.StatusCreated, response)
+}
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index d043b1204..008b9b14b 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -99,12 +99,10 @@ func PodCreate(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http_code, err)
return
}
- utils.WriteResponse(w, http.StatusCreated, handlers.IDResponse{ID: pod.CgroupParent()})
+ utils.WriteResponse(w, http.StatusCreated, handlers.IDResponse{ID: pod.ID()})
}
func Pods(w http.ResponseWriter, r *http.Request) {
- // 200 ok
- // 500 internal
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
podInspectData []*libpod.PodInspect
diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go
index f5d9f9ad5..6268028f5 100644
--- a/pkg/api/handlers/types.go
+++ b/pkg/api/handlers/types.go
@@ -151,6 +151,7 @@ type ContainerTopOKBody struct {
dockerContainer.ContainerTopOKBody
}
+// swagger:model PodCreateConfig
type PodCreateConfig struct {
Name string `json:"name"`
CGroupParent string `json:"cgroup-parent"`
@@ -351,18 +352,21 @@ func ImageDataToImageInspect(ctx context.Context, l *libpodImage.Image) (*ImageI
func LibpodToContainer(l *libpod.Container, infoData []define.InfoData) (*Container, error) {
imageId, imageName := l.Image()
- sizeRW, err := l.RWSize()
- if err != nil {
+
+ var (
+ err error
+ sizeRootFs int64
+ sizeRW int64
+ state define.ContainerStatus
+ )
+
+ if state, err = l.State(); err != nil {
return nil, err
}
-
- SizeRootFs, err := l.RootFsSize()
- if err != nil {
+ if sizeRW, err = l.RWSize(); err != nil {
return nil, err
}
-
- state, err := l.State()
- if err != nil {
+ if sizeRootFs, err = l.RootFsSize(); err != nil {
return nil, err
}
@@ -375,7 +379,7 @@ func LibpodToContainer(l *libpod.Container, infoData []define.InfoData) (*Contai
Created: l.CreatedTime().Unix(),
Ports: nil,
SizeRw: sizeRW,
- SizeRootFs: SizeRootFs,
+ SizeRootFs: sizeRootFs,
Labels: l.Labels(),
State: string(state),
Status: "",
@@ -545,11 +549,3 @@ func portsToPortSet(input map[string]struct{}) (nat.PortSet, error) {
}
return ports, nil
}
-
-// ContainerCreateResponse is the response struct for creating a container
-type ContainerCreateResponse struct {
- // ID of the container created
- ID string `json:"id"`
- // Warnings during container creation
- Warnings []string `json:"Warnings"`
-}
diff --git a/pkg/api/handlers/utils/containers.go b/pkg/api/handlers/utils/containers.go
index 74485edf2..402005581 100644
--- a/pkg/api/handlers/utils/containers.go
+++ b/pkg/api/handlers/utils/containers.go
@@ -1,6 +1,7 @@
package utils
import (
+ "context"
"fmt"
"net/http"
"syscall"
@@ -9,13 +10,22 @@ import (
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
+ createconfig "github.com/containers/libpod/pkg/spec"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
+// ContainerCreateResponse is the response struct for creating a container
+type ContainerCreateResponse struct {
+ // ID of the container created
+ ID string `json:"id"`
+ // Warnings during container creation
+ Warnings []string `json:"Warnings"`
+}
+
func KillContainer(w http.ResponseWriter, r *http.Request) (*libpod.Container, error) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
- decoder := r.Context().Value("decorder").(*schema.Decoder)
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
Signal syscall.Signal `schema:"signal"`
}{
@@ -119,3 +129,18 @@ func GenerateFilterFuncsFromMap(r *libpod.Runtime, filters map[string][]string)
}
return filterFuncs, nil
}
+
+func CreateContainer(ctx context.Context, w http.ResponseWriter, runtime *libpod.Runtime, cc *createconfig.CreateConfig) {
+ var pod *libpod.Pod
+ ctr, err := shared.CreateContainerFromCreateConfig(runtime, cc, ctx, pod)
+ if err != nil {
+ Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "CreateContainerFromCreateConfig()"))
+ return
+ }
+
+ response := ContainerCreateResponse{
+ ID: ctr.ID(),
+ Warnings: []string{}}
+
+ WriteResponse(w, http.StatusCreated, response)
+}
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index d0dfbf3ab..f68a71561 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -31,12 +31,16 @@ func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
if _, found := mux.Vars(r)["digests"]; found && query.Digests {
UnSupportedParameter("digests")
}
+
if len(query.Filters) > 0 {
for k, v := range query.Filters {
for _, val := range v {
filters = append(filters, fmt.Sprintf("%s=%s", k, val))
}
}
+ return runtime.ImageRuntime().GetImagesWithFilters(filters)
+ } else {
+ return runtime.ImageRuntime().GetImages()
}
- return runtime.ImageRuntime().GetImagesWithFilters(filters)
+
}
diff --git a/pkg/api/server/handler_api.go b/pkg/api/server/handler_api.go
index 4b93998ee..30a1680c9 100644
--- a/pkg/api/server/handler_api.go
+++ b/pkg/api/server/handler_api.go
@@ -2,32 +2,52 @@ package server
import (
"context"
+ "fmt"
"net/http"
+ "runtime"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
log "github.com/sirupsen/logrus"
)
// APIHandler is a wrapper to enhance HandlerFunc's and remove redundant code
-func APIHandler(ctx context.Context, h http.HandlerFunc) http.HandlerFunc {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- log.Debugf("APIHandler -- Method: %s URL: %s", r.Method, r.URL.String())
- if err := r.ParseForm(); err != nil {
- log.Infof("Failed Request: unable to parse form: %q", err)
- }
+func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ // http.Server hides panics, we want to see them and fix the cause.
+ defer func() {
+ err := recover()
+ if err != nil {
+ buf := make([]byte, 1<<20)
+ n := runtime.Stack(buf, true)
+ log.Warnf("Recovering from podman handler panic: %v, %s", err, buf[:n])
+ // Try to inform client things went south... won't work if handler already started writing response body
+ utils.InternalServerError(w, fmt.Errorf("%v", err))
+ }
+ }()
+
+ // Wrapper to hide some boiler plate
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ // Connection counting, ugh. Needed to support the sliding window for idle checking.
+ s.ConnectionCh <- EnterHandler
+ defer func() { s.ConnectionCh <- ExitHandler }()
+
+ log.Debugf("APIHandler -- Method: %s URL: %s (conn %d/%d)",
+ r.Method, r.URL.String(), s.ActiveConnections, s.TotalConnections)
- // TODO: Use ConnContext when ported to go 1.13
- c := context.WithValue(r.Context(), "decoder", ctx.Value("decoder"))
- c = context.WithValue(c, "runtime", ctx.Value("runtime"))
- c = context.WithValue(c, "shutdownFunc", ctx.Value("shutdownFunc"))
- r = r.WithContext(c)
+ if err := r.ParseForm(); err != nil {
+ log.Infof("Failed Request: unable to parse form: %q", err)
+ }
- h(w, r)
+ // TODO: Use r.ConnContext when ported to go 1.13
+ c := context.WithValue(r.Context(), "decoder", s.Decoder)
+ c = context.WithValue(c, "runtime", s.Runtime)
+ c = context.WithValue(c, "shutdownFunc", s.Shutdown)
+ r = r.WithContext(c)
- shutdownFunc := r.Context().Value("shutdownFunc").(func() error)
- if err := shutdownFunc(); err != nil {
- log.Errorf("Failed to shutdown Server in APIHandler(): %s", err.Error())
+ h(w, r)
}
- })
+ fn(w, r)
+ }
}
// VersionedPath prepends the version parsing code
diff --git a/pkg/api/server/register_auth.go b/pkg/api/server/register_auth.go
index 9f312683d..8db131153 100644
--- a/pkg/api/server/register_auth.go
+++ b/pkg/api/server/register_auth.go
@@ -5,7 +5,7 @@ import (
"github.com/gorilla/mux"
)
-func (s *APIServer) RegisterAuthHandlers(r *mux.Router) error {
- r.Handle(VersionedPath("/auth"), APIHandler(s.Context, handlers.UnsupportedHandler))
+func (s *APIServer) registerAuthHandlers(r *mux.Router) error {
+ r.Handle(VersionedPath("/auth"), s.APIHandler(handlers.UnsupportedHandler))
return nil
}
diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go
index b1facc27c..6007a2d00 100644
--- a/pkg/api/server/register_containers.go
+++ b/pkg/api/server/register_containers.go
@@ -9,8 +9,8 @@ import (
"github.com/gorilla/mux"
)
-func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
- // swagger:operation POST /containers/create compat containerCreate
+func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
+ // swagger:operation POST /containers/create compat createContainer
// ---
// summary: Create a container
// tags:
@@ -33,7 +33,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/create"), APIHandler(s.Context, handlers.CreateContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/create"), s.APIHandler(generic.CreateContainer)).Methods(http.MethodPost)
// swagger:operation GET /containers/json compat listContainers
// ---
// tags:
@@ -83,7 +83,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/BadParamError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/json"), APIHandler(s.Context, generic.ListContainers)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/containers/json"), s.APIHandler(generic.ListContainers)).Methods(http.MethodGet)
// swagger:operation POST /containers/prune compat pruneContainers
// ---
// tags:
@@ -105,7 +105,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/DocsContainerPruneReport"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/prune"), APIHandler(s.Context, handlers.PruneContainers)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/prune"), s.APIHandler(handlers.PruneContainers)).Methods(http.MethodPost)
// swagger:operation DELETE /containers/{name} compat removeContainer
// ---
// tags:
@@ -144,7 +144,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}"), APIHandler(s.Context, generic.RemoveContainer)).Methods(http.MethodDelete)
+ r.HandleFunc(VersionedPath("/containers/{name}"), s.APIHandler(generic.RemoveContainer)).Methods(http.MethodDelete)
// swagger:operation GET /containers/{name}/json compat getContainer
// ---
// tags:
@@ -171,8 +171,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/json"), APIHandler(s.Context, generic.GetContainer)).Methods(http.MethodGet)
- // swagger:operation post /containers/{name}/kill compat killcontainer
+ r.HandleFunc(VersionedPath("/containers/{name}/json"), s.APIHandler(generic.GetContainer)).Methods(http.MethodGet)
+ // swagger:operation POST /containers/{name}/kill compat killContainer
// ---
// tags:
// - containers (compat)
@@ -201,8 +201,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/kill"), APIHandler(s.Context, generic.KillContainer)).Methods(http.MethodPost)
- // swagger:operation GET /containers/{name}/logs compat LogsFromContainer
+ r.HandleFunc(VersionedPath("/containers/{name}/kill"), s.APIHandler(generic.KillContainer)).Methods(http.MethodPost)
+ // swagger:operation GET /containers/{name}/logs compat logsFromContainer
// ---
// tags:
// - containers (compat)
@@ -253,7 +253,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/logs"), APIHandler(s.Context, generic.LogsFromContainer)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/containers/{name}/logs"), s.APIHandler(generic.LogsFromContainer)).Methods(http.MethodGet)
// swagger:operation POST /containers/{name}/pause compat pauseContainer
// ---
// tags:
@@ -275,8 +275,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/pause"), APIHandler(s.Context, handlers.PauseContainer)).Methods(http.MethodPost)
- r.HandleFunc(VersionedPath("/containers/{name}/rename"), APIHandler(s.Context, handlers.UnsupportedHandler)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/pause"), s.APIHandler(handlers.PauseContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/rename"), s.APIHandler(handlers.UnsupportedHandler)).Methods(http.MethodPost)
// swagger:operation POST /containers/{name}/restart compat restartContainer
// ---
// tags:
@@ -301,7 +301,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/restart"), APIHandler(s.Context, handlers.RestartContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/restart"), s.APIHandler(handlers.RestartContainer)).Methods(http.MethodPost)
// swagger:operation POST /containers/{name}/start compat startContainer
// ---
// tags:
@@ -329,7 +329,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/start"), APIHandler(s.Context, handlers.StartContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/start"), s.APIHandler(handlers.StartContainer)).Methods(http.MethodPost)
// swagger:operation GET /containers/{name}/stats compat statsContainer
// ---
// tags:
@@ -356,7 +356,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/stats"), APIHandler(s.Context, generic.StatsContainer)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/containers/{name}/stats"), s.APIHandler(generic.StatsContainer)).Methods(http.MethodGet)
// swagger:operation POST /containers/{name}/stop compat stopContainer
// ---
// tags:
@@ -384,7 +384,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/stop"), APIHandler(s.Context, handlers.StopContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/stop"), s.APIHandler(handlers.StopContainer)).Methods(http.MethodPost)
// swagger:operation GET /containers/{name}/top compat topContainer
// ---
// tags:
@@ -409,7 +409,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/top"), APIHandler(s.Context, handlers.TopContainer)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/containers/{name}/top"), s.APIHandler(handlers.TopContainer)).Methods(http.MethodGet)
// swagger:operation POST /containers/{name}/unpause compat unpauseContainer
// ---
// tags:
@@ -431,7 +431,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/unpause"), APIHandler(s.Context, handlers.UnpauseContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/unpause"), s.APIHandler(handlers.UnpauseContainer)).Methods(http.MethodPost)
// swagger:operation POST /containers/{name}/wait compat waitContainer
// ---
// tags:
@@ -457,8 +457,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/wait"), APIHandler(s.Context, generic.WaitContainer)).Methods(http.MethodPost)
- // swagger:operation POST /containers/{name}/attach compat attach
+ r.HandleFunc(VersionedPath("/containers/{name}/wait"), s.APIHandler(generic.WaitContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /containers/{name}/attach compat attachContainer
// ---
// tags:
// - containers (compat)
@@ -512,8 +512,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/attach"), APIHandler(s.Context, handlers.AttachContainer)).Methods(http.MethodPost)
- // swagger:operation POST /containers/{name}/resize compat resize
+ r.HandleFunc(VersionedPath("/containers/{name}/attach"), s.APIHandler(handlers.AttachContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /containers/{name}/resize compat resizeContainer
// ---
// tags:
// - containers (compat)
@@ -544,13 +544,37 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/containers/{name}/resize"), APIHandler(s.Context, handlers.ResizeContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/containers/{name}/resize"), s.APIHandler(handlers.ResizeContainer)).Methods(http.MethodPost)
/*
libpod endpoints
*/
- r.HandleFunc(VersionedPath("/libpod/containers/create"), APIHandler(s.Context, handlers.CreateContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /containers/create libpod libpodContainerCreate
+ // ---
+ // summary: Create a container
+ // tags:
+ // - containers
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: body
+ // name: create
+ // description: attributes for creating a container
+ // schema:
+ // $ref: "#/definitions/SpecGenerator"
+ // responses:
+ // 201:
+ // $ref: "#/responses/ContainerCreateResponse"
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 409:
+ // $ref: "#/responses/ConflictError"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/create"), s.APIHandler(libpod.CreateContainer)).Methods(http.MethodPost)
// swagger:operation GET /libpod/containers/json libpod libpodListContainers
// ---
// tags:
@@ -615,7 +639,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/BadParamError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/json"), APIHandler(s.Context, libpod.ListContainers)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/libpod/containers/json"), s.APIHandler(libpod.ListContainers)).Methods(http.MethodGet)
// swagger:operation POST /libpod/containers/prune libpod libpodPruneContainers
// ---
// tags:
@@ -637,8 +661,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/DocsLibpodPruneResponse"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/prune"), APIHandler(s.Context, handlers.PruneContainers)).Methods(http.MethodPost)
- // swagger:operation GET /libpod/containers/showmounted libpod showMounterContainers
+ r.HandleFunc(VersionedPath("/libpod/containers/prune"), s.APIHandler(handlers.PruneContainers)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/containers/showmounted libpod showMountedContainers
// ---
// tags:
// - containers
@@ -655,7 +679,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// type: string
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/showmounted"), APIHandler(s.Context, libpod.ShowMountedContainers)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/libpod/containers/showmounted"), s.APIHandler(libpod.ShowMountedContainers)).Methods(http.MethodGet)
// swagger:operation DELETE /libpod/containers/{name} libpod libpodRemoveContainer
// ---
// tags:
@@ -689,7 +713,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}"), APIHandler(s.Context, libpod.RemoveContainer)).Methods(http.MethodDelete)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}"), s.APIHandler(libpod.RemoveContainer)).Methods(http.MethodDelete)
// swagger:operation GET /libpod/containers/{name}/json libpod libpodGetContainer
// ---
// tags:
@@ -715,7 +739,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/json"), APIHandler(s.Context, libpod.GetContainer)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/json"), s.APIHandler(libpod.GetContainer)).Methods(http.MethodGet)
// swagger:operation POST /libpod/containers/{name}/kill libpod libpodKillContainer
// ---
// tags:
@@ -744,8 +768,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/kill"), APIHandler(s.Context, libpod.KillContainer)).Methods(http.MethodGet)
- // swagger:operation GET /libpod/containers/{name}/mount libpod mountContainer
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/kill"), s.APIHandler(libpod.KillContainer)).Methods(http.MethodPost)
+ // swagger:operation POST /libpod/containers/{name}/mount libpod mountContainer
// ---
// tags:
// - containers
@@ -770,7 +794,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/mount"), APIHandler(s.Context, libpod.MountContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/mount"), s.APIHandler(libpod.MountContainer)).Methods(http.MethodPost)
// swagger:operation POST /libpod/containers/{name}/unmount libpod libpodUnmountContainer
// ---
// tags:
@@ -792,7 +816,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/unmount"), APIHandler(s.Context, libpod.UnmountContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/unmount"), s.APIHandler(libpod.UnmountContainer)).Methods(http.MethodPost)
// swagger:operation GET /libpod/containers/{name}/logs libpod libpodLogsFromContainer
// ---
// tags:
@@ -844,7 +868,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/logs"), APIHandler(s.Context, generic.LogsFromContainer)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/logs"), s.APIHandler(generic.LogsFromContainer)).Methods(http.MethodGet)
// swagger:operation POST /libpod/containers/{name}/pause libpod libpodPauseContainer
// ---
// tags:
@@ -866,7 +890,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// "$ref": "#/responses/NoSuchContainer"
// 500:
// "$ref": "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/pause"), APIHandler(s.Context, handlers.PauseContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name:..*}/pause"), s.APIHandler(handlers.PauseContainer)).Methods(http.MethodPost)
// swagger:operation POST /libpod/containers/{name}/restart libpod libpodRestartContainer
// ---
// tags:
@@ -891,7 +915,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/restart"), APIHandler(s.Context, handlers.RestartContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/restart"), s.APIHandler(handlers.RestartContainer)).Methods(http.MethodPost)
// swagger:operation POST /libpod/containers/{name}/start libpod libpodStartContainer
// ---
// tags:
@@ -919,7 +943,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/start"), APIHandler(s.Context, handlers.StartContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/start"), s.APIHandler(handlers.StartContainer)).Methods(http.MethodPost)
// swagger:operation GET /libpod/containers/{name}/stats libpod libpodStatsContainer
// ---
// tags:
@@ -946,7 +970,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/stats"), APIHandler(s.Context, generic.StatsContainer)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/stats"), s.APIHandler(generic.StatsContainer)).Methods(http.MethodGet)
// swagger:operation GET /libpod/containers/{name}/top libpod libpodTopContainer
// ---
// tags:
@@ -980,7 +1004,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/top"), APIHandler(s.Context, handlers.TopContainer)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/top"), s.APIHandler(handlers.TopContainer)).Methods(http.MethodGet)
// swagger:operation POST /libpod/containers/{name}/unpause libpod libpodUnpauseContainer
// ---
// tags:
@@ -1001,7 +1025,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/unpause"), APIHandler(s.Context, handlers.UnpauseContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/unpause"), s.APIHandler(handlers.UnpauseContainer)).Methods(http.MethodPost)
// swagger:operation POST /libpod/containers/{name}/wait libpod libpodWaitContainer
// ---
// tags:
@@ -1022,8 +1046,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/wait"), APIHandler(s.Context, libpod.WaitContainer)).Methods(http.MethodPost)
- // swagger:operation POST /libpod/containers/{name}/exists libpod containerExists
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/wait"), s.APIHandler(libpod.WaitContainer)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/containers/{name}/exists libpod containerExists
// ---
// tags:
// - containers
@@ -1044,7 +1068,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/exists"), APIHandler(s.Context, libpod.ContainerExists)).Methods(http.MethodGet)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/exists"), s.APIHandler(libpod.ContainerExists)).Methods(http.MethodGet)
// swagger:operation POST /libpod/containers/{name}/stop libpod libpodStopContainer
// ---
// tags:
@@ -1071,7 +1095,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/stop"), APIHandler(s.Context, handlers.StopContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/stop"), s.APIHandler(handlers.StopContainer)).Methods(http.MethodPost)
// swagger:operation POST /libpod/containers/{name}/attach libpod libpodAttach
// ---
// tags:
@@ -1126,7 +1150,7 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/attach"), APIHandler(s.Context, handlers.AttachContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/attach"), s.APIHandler(handlers.AttachContainer)).Methods(http.MethodPost)
// swagger:operation POST /libpod/containers/{name}/resize libpod libpodResize
// ---
// tags:
@@ -1158,6 +1182,6 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchContainer"
// 500:
// $ref: "#/responses/InternalError"
- r.HandleFunc(VersionedPath("/libpod/containers/{name}/resize"), APIHandler(s.Context, handlers.ResizeContainer)).Methods(http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/resize"), s.APIHandler(handlers.ResizeContainer)).Methods(http.MethodPost)
return nil
}
diff --git a/pkg/api/server/register_distribution.go b/pkg/api/server/register_distribution.go
index b0ac61fb8..f03662224 100644
--- a/pkg/api/server/register_distribution.go
+++ b/pkg/api/server/register_distribution.go
@@ -5,7 +5,7 @@ import (
"github.com/gorilla/mux"
)
-func (s *APIServer) RegisterDistributionHandlers(r *mux.Router) error {
+func (s *APIServer) registerDistributionHandlers(r *mux.Router) error {
r.HandleFunc(VersionedPath("/distribution/{name}/json"), handlers.UnsupportedHandler)
return nil
}
diff --git a/pkg/api/server/register_events.go b/pkg/api/server/register_events.go
index 090f66323..bc3b62662 100644
--- a/pkg/api/server/register_events.go
+++ b/pkg/api/server/register_events.go
@@ -1,11 +1,13 @@
package server
import (
+ "net/http"
+
"github.com/containers/libpod/pkg/api/handlers"
"github.com/gorilla/mux"
)
-func (s *APIServer) RegisterEventsHandlers(r *mux.Router) error {
+func (s *APIServer) registerEventsHandlers(r *mux.Router) error {
// swagger:operation GET /events system getEvents
// ---
// tags:
@@ -32,6 +34,6 @@ func (s *APIServer) RegisterEventsHandlers(r *mux.Router) error {
// description: returns a string of json data describing an event
// 500:
// "$ref": "#/responses/InternalError"
- r.Handle(VersionedPath("/events"), APIHandler(s.Context, handlers.GetEvents))
+ r.Handle(VersionedPath("/events"), s.APIHandler(handlers.GetEvents)).Methods(http.MethodGet)
return nil
}
diff --git a/pkg/api/server/register_exec.go b/pkg/api/server/register_exec.go
index dbf04dc19..ad62de3f5 100644
--- a/pkg/api/server/register_exec.go
+++ b/pkg/api/server/register_exec.go
@@ -74,7 +74,7 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// description: container is paused
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/containers/{name}/create"), APIHandler(s.Context, handlers.CreateExec)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/containers/{name}/create"), s.APIHandler(handlers.CreateExec)).Methods(http.MethodPost)
// swagger:operation POST /exec/{id}/start compat startExec
// ---
// tags:
@@ -110,7 +110,7 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// description: container is stopped or paused
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/exec/{id}/start"), APIHandler(s.Context, handlers.StartExec)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/exec/{id}/start"), s.APIHandler(handlers.StartExec)).Methods(http.MethodPost)
// swagger:operation POST /exec/{id}/resize compat resizeExec
// ---
// tags:
@@ -141,8 +141,8 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchExecInstance"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/exec/{id}/resize"), APIHandler(s.Context, handlers.ResizeExec)).Methods(http.MethodPost)
- // swagger:operation GET /exec/{id}/inspect compat inspectExec
+ r.Handle(VersionedPath("/exec/{id}/resize"), s.APIHandler(handlers.ResizeExec)).Methods(http.MethodPost)
+ // swagger:operation GET /exec/{id}/json compat inspectExec
// ---
// tags:
// - exec (compat)
@@ -163,7 +163,7 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchExecInstance"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/exec/{id}/json"), APIHandler(s.Context, handlers.InspectExec)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/exec/{id}/json"), s.APIHandler(handlers.InspectExec)).Methods(http.MethodGet)
/*
libpod api follows
@@ -235,7 +235,7 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// description: container is paused
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/containers/{name}/create"), APIHandler(s.Context, handlers.CreateExec)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/containers/{name}/create"), s.APIHandler(handlers.CreateExec)).Methods(http.MethodPost)
// swagger:operation POST /libpod/exec/{id}/start libpod libpodStartExec
// ---
// tags:
@@ -271,7 +271,7 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// description: container is stopped or paused
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/exec/{id}/start"), APIHandler(s.Context, handlers.StartExec)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/exec/{id}/start"), s.APIHandler(handlers.StartExec)).Methods(http.MethodPost)
// swagger:operation POST /libpod/exec/{id}/resize libpod libpodResizeExec
// ---
// tags:
@@ -302,8 +302,8 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchExecInstance"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/exec/{id}/resize"), APIHandler(s.Context, handlers.ResizeExec)).Methods(http.MethodPost)
- // swagger:operation GET /libpod/exec/{id}/inspect libpod libpodInspectExec
+ r.Handle(VersionedPath("/libpod/exec/{id}/resize"), s.APIHandler(handlers.ResizeExec)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/exec/{id}/json libpod libpodInspectExec
// ---
// tags:
// - exec
@@ -324,6 +324,6 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchExecInstance"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/exec/{id}/json"), APIHandler(s.Context, handlers.InspectExec)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/libpod/exec/{id}/json"), s.APIHandler(handlers.InspectExec)).Methods(http.MethodGet)
return nil
}
diff --git a/pkg/api/server/register_healthcheck.go b/pkg/api/server/register_healthcheck.go
index 1286324f0..5466e2905 100644
--- a/pkg/api/server/register_healthcheck.go
+++ b/pkg/api/server/register_healthcheck.go
@@ -8,6 +8,6 @@ import (
)
func (s *APIServer) registerHealthCheckHandlers(r *mux.Router) error {
- r.Handle(VersionedPath("/libpod/containers/{name}/runhealthcheck"), APIHandler(s.Context, libpod.RunHealthCheck)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/libpod/containers/{name}/runhealthcheck"), s.APIHandler(libpod.RunHealthCheck)).Methods(http.MethodGet)
return nil
}
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 4706a7c69..4c8f05385 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -47,8 +47,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchImage"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/images/create"), APIHandler(s.Context, generic.CreateImageFromImage)).Methods(http.MethodPost).Queries("fromImage", "{fromImage}")
- r.Handle(VersionedPath("/images/create"), APIHandler(s.Context, generic.CreateImageFromSrc)).Methods(http.MethodPost).Queries("fromSrc", "{fromSrc}")
+ r.Handle(VersionedPath("/images/create"), s.APIHandler(generic.CreateImageFromImage)).Methods(http.MethodPost).Queries("fromImage", "{fromImage}")
+ r.Handle(VersionedPath("/images/create"), s.APIHandler(generic.CreateImageFromSrc)).Methods(http.MethodPost).Queries("fromSrc", "{fromSrc}")
// swagger:operation GET /images/json compat listImages
// ---
// tags:
@@ -83,7 +83,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/DockerImageSummary"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/images/json"), APIHandler(s.Context, generic.GetImages)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/images/json"), s.APIHandler(generic.GetImages)).Methods(http.MethodGet)
// swagger:operation POST /images/load compat importImage
// ---
// tags:
@@ -107,7 +107,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: no error
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/images/load"), APIHandler(s.Context, generic.LoadImages)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/images/load"), s.APIHandler(generic.LoadImages)).Methods(http.MethodPost)
// swagger:operation POST /images/prune compat pruneImages
// ---
// tags:
@@ -132,7 +132,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/DocsImageDeleteResponse"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/images/prune"), APIHandler(s.Context, generic.PruneImages)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/images/prune"), s.APIHandler(generic.PruneImages)).Methods(http.MethodPost)
// swagger:operation GET /images/search compat searchImages
// ---
// tags:
@@ -161,10 +161,12 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// responses:
// 200:
// $ref: "#/responses/DocsSearchResponse"
+ // 400:
+ // $ref: "#/responses/BadParamError"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/images/search"), APIHandler(s.Context, handlers.SearchImages)).Methods(http.MethodGet)
- // swagger:operation DELETE /images/{name} compat removeImage
+ r.Handle(VersionedPath("/images/search"), s.APIHandler(handlers.SearchImages)).Methods(http.MethodGet)
+ // swagger:operation DELETE /images/{name:.*} compat removeImage
// ---
// tags:
// - images (compat)
@@ -172,7 +174,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Delete an image from local storage
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: name or ID of image to delete
@@ -195,8 +197,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/ConflictError'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/images/{name}"), APIHandler(s.Context, handlers.RemoveImage)).Methods(http.MethodDelete)
- // swagger:operation GET /images/{name}/get compat exportImage
+ r.Handle(VersionedPath("/images/{name:.*}"), s.APIHandler(handlers.RemoveImage)).Methods(http.MethodDelete)
+ // swagger:operation GET /images/{name:.*}/get compat exportImage
// ---
// tags:
// - images (compat)
@@ -204,7 +206,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Export an image in tarball format
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -218,8 +220,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// format: binary
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/images/{name}/get"), APIHandler(s.Context, generic.ExportImage)).Methods(http.MethodGet)
- // swagger:operation GET /images/{name}/history compat imageHistory
+ r.Handle(VersionedPath("/images/{name:.*}/get"), s.APIHandler(generic.ExportImage)).Methods(http.MethodGet)
+ // swagger:operation GET /images/{name:.*}/history compat imageHistory
// ---
// tags:
// - images (compat)
@@ -227,7 +229,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Return parent layers of an image.
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -240,8 +242,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchImage"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/images/{name}/history"), APIHandler(s.Context, handlers.HistoryImage)).Methods(http.MethodGet)
- // swagger:operation GET /images/{name}/json compat inspectImage
+ r.Handle(VersionedPath("/images/{name:.*}/history"), s.APIHandler(handlers.HistoryImage)).Methods(http.MethodGet)
+ // swagger:operation GET /images/{name:.*}/json compat inspectImage
// ---
// tags:
// - images (compat)
@@ -249,7 +251,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Return low-level information about an image.
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -262,8 +264,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchImage"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/images/{name}/json"), APIHandler(s.Context, generic.GetImage))
- // swagger:operation POST /images/{name}/tag compat tagImage
+ r.Handle(VersionedPath("/images/{name:.*}/json"), s.APIHandler(generic.GetImage)).Methods(http.MethodGet)
+ // swagger:operation POST /images/{name:.*}/tag compat tagImage
// ---
// tags:
// - images (compat)
@@ -271,7 +273,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Tag an image so that it becomes part of a repository.
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -296,8 +298,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/ConflictError'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/images/{name}/tag"), APIHandler(s.Context, handlers.TagImage)).Methods(http.MethodPost)
- // swagger:operation POST /commit/ compat commitContainer
+ r.Handle(VersionedPath("/images/{name:.*}/tag"), s.APIHandler(handlers.TagImage)).Methods(http.MethodPost)
+ // swagger:operation POST /commit compat commitContainer
// ---
// tags:
// - containers (compat)
@@ -341,9 +343,9 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/NoSuchImage'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/commit"), APIHandler(s.Context, generic.CommitContainer)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/commit"), s.APIHandler(generic.CommitContainer)).Methods(http.MethodPost)
- // swagger:operation POST /build images buildImage
+ // swagger:operation POST /build compat buildImage
// ---
// tags:
// - images
@@ -551,12 +553,12 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/BadParamError"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/build"), APIHandler(s.Context, handlers.BuildImage)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/build"), s.APIHandler(handlers.BuildImage)).Methods(http.MethodPost)
/*
libpod endpoints
*/
- // swagger:operation POST /libpod/images/{name}/exists libpod libpodImageExists
+ // swagger:operation GET /libpod/images/{name:.*}/exists libpod libpodImageExists
// ---
// tags:
// - images
@@ -564,7 +566,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Check if image exists in local store
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -577,8 +579,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/NoSuchImage'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name}/exists"), APIHandler(s.Context, libpod.ImageExists))
- // swagger:operation POST /libpod/images/{name}/tree libpod libpodImageTree
+ r.Handle(VersionedPath("/libpod/images/{name:.*}/exists"), s.APIHandler(libpod.ImageExists)).Methods(http.MethodGet)
+ // swagger:operation GET /libpod/images/{name:.*}/tree libpod libpodImageTree
// ---
// tags:
// - images
@@ -586,7 +588,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Retrieve the image tree for the provided image name or ID
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -603,8 +605,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/NoSuchImage'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name}/tree"), APIHandler(s.Context, libpod.ImageTree))
- // swagger:operation GET /libpod/images/{name}/history libpod libpodImageHistory
+ r.Handle(VersionedPath("/libpod/images/{name:.*}/tree"), s.APIHandler(libpod.ImageTree)).Methods(http.MethodGet)
+ // swagger:operation GET /libpod/images/{name:.*}/history libpod libpodImageHistory
// ---
// tags:
// - images
@@ -612,7 +614,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Return parent layers of an image.
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -625,7 +627,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/NoSuchImage'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name}/history"), APIHandler(s.Context, handlers.HistoryImage)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/libpod/images/{name:.*}/history"), s.APIHandler(handlers.HistoryImage)).Methods(http.MethodGet)
// swagger:operation GET /libpod/images/json libpod libpodListImages
// ---
// tags:
@@ -655,7 +657,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/DockerImageSummary"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/json"), APIHandler(s.Context, libpod.GetImages)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/libpod/images/json"), s.APIHandler(libpod.GetImages)).Methods(http.MethodGet)
// swagger:operation POST /libpod/images/load libpod libpodImagesLoad
// ---
// tags:
@@ -681,7 +683,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/BadParamError"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/load"), APIHandler(s.Context, libpod.ImagesLoad)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/images/load"), s.APIHandler(libpod.ImagesLoad)).Methods(http.MethodPost)
// swagger:operation POST /libpod/images/import libpod libpodImagesImport
// ---
// tags:
@@ -721,7 +723,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/BadParamError"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/import"), APIHandler(s.Context, libpod.ImagesImport)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/images/import"), s.APIHandler(libpod.ImagesImport)).Methods(http.MethodPost)
// swagger:operation POST /libpod/images/pull libpod libpodImagesPull
// ---
// tags:
@@ -763,7 +765,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/BadParamError"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/pull"), APIHandler(s.Context, libpod.ImagesPull)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/images/pull"), s.APIHandler(libpod.ImagesPull)).Methods(http.MethodPost)
// swagger:operation POST /libpod/images/prune libpod libpodPruneImages
// ---
// tags:
@@ -788,7 +790,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/DocsImageDeleteResponse"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/prune"), APIHandler(s.Context, libpod.PruneImages)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/images/prune"), s.APIHandler(libpod.PruneImages)).Methods(http.MethodPost)
// swagger:operation GET /libpod/images/search libpod libpodSearchImages
// ---
// tags:
@@ -819,8 +821,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: "#/responses/DocsSearchResponse"
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/search"), APIHandler(s.Context, handlers.SearchImages)).Methods(http.MethodGet)
- // swagger:operation DELETE /libpod/images/{name} libpod libpodRemoveImage
+ r.Handle(VersionedPath("/libpod/images/search"), s.APIHandler(handlers.SearchImages)).Methods(http.MethodGet)
+ // swagger:operation DELETE /libpod/images/{name:.*} libpod libpodRemoveImage
// ---
// tags:
// - images
@@ -828,7 +830,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Delete an image from local store
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: name or ID of image to delete
@@ -849,8 +851,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/ConflictError'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name}"), APIHandler(s.Context, handlers.RemoveImage)).Methods(http.MethodDelete)
- // swagger:operation GET /libpod/images/{name}/get libpod libpoodExportImage
+ r.Handle(VersionedPath("/libpod/images/{name:.*}"), s.APIHandler(handlers.RemoveImage)).Methods(http.MethodDelete)
+ // swagger:operation GET /libpod/images/{name:.*}/get libpod libpodExportImage
// ---
// tags:
// - images
@@ -858,7 +860,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Export an image as a tarball
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -882,8 +884,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/NoSuchImage'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name}/get"), APIHandler(s.Context, libpod.ExportImage)).Methods(http.MethodGet)
- // swagger:operation GET /libpod/images/{name}/json libpod libpodInspectImage
+ r.Handle(VersionedPath("/libpod/images/{name:.*}/get"), s.APIHandler(libpod.ExportImage)).Methods(http.MethodGet)
+ // swagger:operation GET /libpod/images/{name:.*}/json libpod libpodInspectImage
// ---
// tags:
// - images
@@ -891,7 +893,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Obtain low-level information about an image
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -904,8 +906,8 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/NoSuchImage'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name}/json"), APIHandler(s.Context, libpod.GetImage))
- // swagger:operation POST /libpod/images/{name}/tag libpod libpodTagImage
+ r.Handle(VersionedPath("/libpod/images/{name:.*}/json"), s.APIHandler(libpod.GetImage)).Methods(http.MethodGet)
+ // swagger:operation POST /libpod/images/{name:.*}/tag libpod libpodTagImage
// ---
// tags:
// - images
@@ -913,7 +915,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// description: Tag an image so that it becomes part of a repository.
// parameters:
// - in: path
- // name: name
+ // name: name:.*
// type: string
// required: true
// description: the name or ID of the container
@@ -938,7 +940,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/ConflictError'
// 500:
// $ref: '#/responses/InternalError'
- r.Handle(VersionedPath("/libpod/images/{name}/tag"), APIHandler(s.Context, handlers.TagImage)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/images/{name:.*}/tag"), s.APIHandler(handlers.TagImage)).Methods(http.MethodPost)
return nil
}
diff --git a/pkg/api/server/register_info.go b/pkg/api/server/register_info.go
index 8c50fed7f..36c467cc3 100644
--- a/pkg/api/server/register_info.go
+++ b/pkg/api/server/register_info.go
@@ -21,6 +21,6 @@ func (s *APIServer) registerInfoHandlers(r *mux.Router) error {
// description: to be determined
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/info"), APIHandler(s.Context, generic.GetInfo)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/info"), s.APIHandler(generic.GetInfo)).Methods(http.MethodGet)
return nil
}
diff --git a/pkg/api/server/register_monitor.go b/pkg/api/server/register_monitor.go
index e6c235419..dbe0d27ce 100644
--- a/pkg/api/server/register_monitor.go
+++ b/pkg/api/server/register_monitor.go
@@ -5,7 +5,7 @@ import (
"github.com/gorilla/mux"
)
-func (s *APIServer) RegisterMonitorHandlers(r *mux.Router) error {
- r.Handle(VersionedPath("/monitor"), APIHandler(s.Context, handlers.UnsupportedHandler))
+func (s *APIServer) registerMonitorHandlers(r *mux.Router) error {
+ r.Handle(VersionedPath("/monitor"), s.APIHandler(handlers.UnsupportedHandler))
return nil
}
diff --git a/pkg/api/server/register_ping.go b/pkg/api/server/register_ping.go
index 086e674a1..349a8a71a 100644
--- a/pkg/api/server/register_ping.go
+++ b/pkg/api/server/register_ping.go
@@ -9,8 +9,8 @@ import (
func (s *APIServer) registerPingHandlers(r *mux.Router) error {
- r.Handle("/_ping", APIHandler(s.Context, handlers.Ping)).Methods(http.MethodGet)
- r.Handle("/_ping", APIHandler(s.Context, handlers.Ping)).Methods(http.MethodHead)
+ r.Handle("/_ping", s.APIHandler(handlers.Ping)).Methods(http.MethodGet)
+ r.Handle("/_ping", s.APIHandler(handlers.Ping)).Methods(http.MethodHead)
// swagger:operation GET /libpod/_ping libpod libpodPingGet
// ---
@@ -61,7 +61,7 @@ func (s *APIServer) registerPingHandlers(r *mux.Router) error {
// determine if talking to Podman engine or another engine
// 500:
// $ref: "#/responses/InternalError"
- r.Handle("/libpod/_ping", APIHandler(s.Context, handlers.Ping)).Methods(http.MethodGet)
- r.Handle("/libpod/_ping", APIHandler(s.Context, handlers.Ping)).Methods(http.MethodHead)
+ r.Handle("/libpod/_ping", s.APIHandler(handlers.Ping)).Methods(http.MethodGet)
+ r.Handle("/libpod/_ping", s.APIHandler(handlers.Ping)).Methods(http.MethodHead)
return nil
}
diff --git a/pkg/api/server/register_plugins.go b/pkg/api/server/register_plugins.go
index 7fd6b9c4c..479a79d1f 100644
--- a/pkg/api/server/register_plugins.go
+++ b/pkg/api/server/register_plugins.go
@@ -5,7 +5,7 @@ import (
"github.com/gorilla/mux"
)
-func (s *APIServer) RegisterPluginsHandlers(r *mux.Router) error {
- r.Handle(VersionedPath("/plugins"), APIHandler(s.Context, handlers.UnsupportedHandler))
+func (s *APIServer) registerPluginsHandlers(r *mux.Router) error {
+ r.Handle(VersionedPath("/plugins"), s.APIHandler(handlers.UnsupportedHandler))
return nil
}
diff --git a/pkg/api/server/register_pods.go b/pkg/api/server/register_pods.go
index 974568d47..af2330665 100644
--- a/pkg/api/server/register_pods.go
+++ b/pkg/api/server/register_pods.go
@@ -25,8 +25,27 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/BadParamError"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/json"), APIHandler(s.Context, libpod.Pods)).Methods(http.MethodGet)
- r.Handle(VersionedPath("/libpod/pods/create"), APIHandler(s.Context, libpod.PodCreate)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/json"), s.APIHandler(libpod.Pods)).Methods(http.MethodGet)
+ // swagger:operation POST /libpod/pods/create pods CreatePod
+ // ---
+ // summary: Create a pod
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: body
+ // name: create
+ // description: attributes for creating a pod
+ // schema:
+ // type: object
+ // $ref: "#/definitions/PodCreateConfig"
+ // responses:
+ // 200:
+ // $ref: "#/definitions/IdResponse"
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.Handle(VersionedPath("/libpod/pods/create"), s.APIHandler(libpod.PodCreate)).Methods(http.MethodPost)
// swagger:operation POST /libpod/pods/prune pods PrunePods
// ---
// summary: Prune unused pods
@@ -45,7 +64,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// description: pod already exists
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/prune"), APIHandler(s.Context, libpod.PodPrune)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/prune"), s.APIHandler(libpod.PodPrune)).Methods(http.MethodPost)
// swagger:operation DELETE /libpod/pods/{name} pods removePod
// ---
// summary: Remove pod
@@ -70,7 +89,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}"), APIHandler(s.Context, libpod.PodDelete)).Methods(http.MethodDelete)
+ r.Handle(VersionedPath("/libpod/pods/{name}"), s.APIHandler(libpod.PodDelete)).Methods(http.MethodDelete)
// swagger:operation GET /libpod/pods/{name}/json pods inspectPod
// ---
// summary: Inspect pod
@@ -89,7 +108,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/json"), APIHandler(s.Context, libpod.PodInspect)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/libpod/pods/{name}/json"), s.APIHandler(libpod.PodInspect)).Methods(http.MethodGet)
// swagger:operation GET /libpod/pods/{name}/exists pods podExists
// ---
// summary: Pod exists
@@ -109,7 +128,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/exists"), APIHandler(s.Context, libpod.PodExists)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/libpod/pods/{name}/exists"), s.APIHandler(libpod.PodExists)).Methods(http.MethodGet)
// swagger:operation POST /libpod/pods/{name}/kill pods killPod
// ---
// summary: Kill a pod
@@ -137,7 +156,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/ConflictError"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/kill"), APIHandler(s.Context, libpod.PodKill)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/{name}/kill"), s.APIHandler(libpod.PodKill)).Methods(http.MethodPost)
// swagger:operation POST /libpod/pods/{name}/pause pods pausePod
// ---
// summary: Pause a pod
@@ -157,7 +176,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/pause"), APIHandler(s.Context, libpod.PodPause)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/{name}/pause"), s.APIHandler(libpod.PodPause)).Methods(http.MethodPost)
// swagger:operation POST /libpod/pods/{name}/restart pods restartPod
// ---
// summary: Restart a pod
@@ -176,7 +195,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/restart"), APIHandler(s.Context, libpod.PodRestart)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/{name}/restart"), s.APIHandler(libpod.PodRestart)).Methods(http.MethodPost)
// swagger:operation POST /libpod/pods/{name}/start pods startPod
// ---
// summary: Start a pod
@@ -197,7 +216,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/start"), APIHandler(s.Context, libpod.PodStart)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/{name}/start"), s.APIHandler(libpod.PodStart)).Methods(http.MethodPost)
// swagger:operation POST /libpod/pods/{name}/stop pods stopPod
// ---
// summary: Stop a pod
@@ -224,7 +243,7 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/stop"), APIHandler(s.Context, libpod.PodStop)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/{name}/stop"), s.APIHandler(libpod.PodStop)).Methods(http.MethodPost)
// swagger:operation POST /libpod/pods/{name}/unpause pods unpausePod
// ---
// summary: Unpause a pod
@@ -243,6 +262,6 @@ func (s *APIServer) registerPodsHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchPod"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle(VersionedPath("/libpod/pods/{name}/unpause"), APIHandler(s.Context, libpod.PodUnpause)).Methods(http.MethodPost)
+ r.Handle(VersionedPath("/libpod/pods/{name}/unpause"), s.APIHandler(libpod.PodUnpause)).Methods(http.MethodPost)
return nil
}
diff --git a/pkg/api/server/register_swarm.go b/pkg/api/server/register_swarm.go
index 63d8acfde..e37ac4e41 100644
--- a/pkg/api/server/register_swarm.go
+++ b/pkg/api/server/register_swarm.go
@@ -9,7 +9,7 @@ import (
"github.com/sirupsen/logrus"
)
-func (s *APIServer) RegisterSwarmHandlers(r *mux.Router) error {
+func (s *APIServer) registerSwarmHandlers(r *mux.Router) error {
r.PathPrefix("/v{version:[0-9.]+}/configs/").HandlerFunc(noSwarm)
r.PathPrefix("/v{version:[0-9.]+}/nodes/").HandlerFunc(noSwarm)
r.PathPrefix("/v{version:[0-9.]+}/secrets/").HandlerFunc(noSwarm)
diff --git a/pkg/api/server/register_system.go b/pkg/api/server/register_system.go
index f0eaeffd2..188c1cdac 100644
--- a/pkg/api/server/register_system.go
+++ b/pkg/api/server/register_system.go
@@ -1,11 +1,13 @@
package server
import (
+ "net/http"
+
"github.com/containers/libpod/pkg/api/handlers/generic"
"github.com/gorilla/mux"
)
func (s *APIServer) registerSystemHandlers(r *mux.Router) error {
- r.Handle(VersionedPath("/system/df"), APIHandler(s.Context, generic.GetDiskUsage))
+ r.Handle(VersionedPath("/system/df"), s.APIHandler(generic.GetDiskUsage)).Methods(http.MethodGet)
return nil
}
diff --git a/pkg/api/server/register_version.go b/pkg/api/server/register_version.go
index d3b47c2a9..ee01ad4b3 100644
--- a/pkg/api/server/register_version.go
+++ b/pkg/api/server/register_version.go
@@ -1,12 +1,14 @@
package server
import (
+ "net/http"
+
"github.com/containers/libpod/pkg/api/handlers"
"github.com/gorilla/mux"
)
func (s *APIServer) registerVersionHandlers(r *mux.Router) error {
- r.Handle("/version", APIHandler(s.Context, handlers.VersionHandler))
- r.Handle(VersionedPath("/version"), APIHandler(s.Context, handlers.VersionHandler))
+ r.Handle("/version", s.APIHandler(handlers.VersionHandler)).Methods(http.MethodGet)
+ r.Handle(VersionedPath("/version"), s.APIHandler(handlers.VersionHandler)).Methods(http.MethodGet)
return nil
}
diff --git a/pkg/api/server/register_volumes.go b/pkg/api/server/register_volumes.go
index d34c71238..efe56a3ad 100644
--- a/pkg/api/server/register_volumes.go
+++ b/pkg/api/server/register_volumes.go
@@ -18,9 +18,9 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error {
// description: tbd
// '500':
// "$ref": "#/responses/InternalError"
- r.Handle("/libpod/volumes/create", APIHandler(s.Context, libpod.CreateVolume)).Methods(http.MethodPost)
- r.Handle("/libpod/volumes/json", APIHandler(s.Context, libpod.ListVolumes)).Methods(http.MethodGet)
- // swagger:operation POST /volumes/prune volumes pruneVolumes
+ r.Handle("/libpod/volumes/create", s.APIHandler(libpod.CreateVolume)).Methods(http.MethodPost)
+ r.Handle("/libpod/volumes/json", s.APIHandler(libpod.ListVolumes)).Methods(http.MethodGet)
+ // swagger:operation POST /libpod/volumes/prune volumes pruneVolumes
// ---
// summary: Prune volumes
// produces:
@@ -30,8 +30,8 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error {
// description: no error
// '500':
// "$ref": "#/responses/InternalError"
- r.Handle("/libpod/volumes/prune", APIHandler(s.Context, libpod.PruneVolumes)).Methods(http.MethodPost)
- // swagger:operation GET /volumes/{name}/json volumes inspectVolume
+ r.Handle("/libpod/volumes/prune", s.APIHandler(libpod.PruneVolumes)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/volumes/{name}/json volumes inspectVolume
// ---
// summary: Inspect volume
// parameters:
@@ -49,8 +49,8 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error {
// "$ref": "#/responses/NoSuchVolume"
// '500':
// "$ref": "#/responses/InternalError"
- r.Handle("/libpod/volumes/{name}/json", APIHandler(s.Context, libpod.InspectVolume)).Methods(http.MethodGet)
- // swagger:operation DELETE /volumes/{name} volumes removeVolume
+ r.Handle("/libpod/volumes/{name}/json", s.APIHandler(libpod.InspectVolume)).Methods(http.MethodGet)
+ // swagger:operation DELETE /libpod/volumes/{name} volumes removeVolume
// ---
// summary: Remove volume
// parameters:
@@ -74,6 +74,6 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchVolume"
// 500:
// $ref: "#/responses/InternalError"
- r.Handle("/libpod/volumes/{name}", APIHandler(s.Context, libpod.RemoveVolume)).Methods(http.MethodDelete)
+ r.Handle("/libpod/volumes/{name}", s.APIHandler(libpod.RemoveVolume)).Methods(http.MethodDelete)
return nil
}
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index 2c709bc59..e7b2a5525 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -20,20 +20,26 @@ import (
)
type APIServer struct {
- http.Server // The HTTP work happens here
- *schema.Decoder // Decoder for Query parameters to structs
- context.Context // Context to carry objects to handlers
- *libpod.Runtime // Where the real work happens
- net.Listener // mux for routing HTTP API calls to libpod routines
- context.CancelFunc // Stop APIServer
- *time.Timer // Hold timer for sliding window
- time.Duration // Duration of client access sliding window
+ http.Server // The HTTP work happens here
+ *schema.Decoder // Decoder for Query parameters to structs
+ context.Context // Context to carry objects to handlers
+ *libpod.Runtime // Where the real work happens
+ net.Listener // mux for routing HTTP API calls to libpod routines
+ context.CancelFunc // Stop APIServer
+ *time.Timer // Hold timer for sliding window
+ time.Duration // Duration of client access sliding window
+ ActiveConnections uint64 // Number of handlers holding a connection
+ TotalConnections uint64 // Number of connections handled
+ ConnectionCh chan int // Channel for signalling handler enter/exit
}
// Number of seconds to wait for next request, if exceeded shutdown server
const (
DefaultServiceDuration = 300 * time.Second
UnlimitedServiceDuration = 0 * time.Second
+ EnterHandler = 1
+ ExitHandler = -1
+ NOOPHandler = 0
)
// NewServer will create and configure a new API server with all defaults
@@ -68,31 +74,19 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
Server: http.Server{
Handler: router,
ReadHeaderTimeout: 20 * time.Second,
- ReadTimeout: 20 * time.Second,
- WriteTimeout: 2 * time.Minute,
+ IdleTimeout: duration,
},
- Decoder: handlers.NewAPIDecoder(),
- Context: nil,
- Runtime: runtime,
- Listener: *listener,
- CancelFunc: nil,
- Duration: duration,
+ Decoder: handlers.NewAPIDecoder(),
+ Runtime: runtime,
+ Listener: *listener,
+ Duration: duration,
+ ConnectionCh: make(chan int),
}
+
server.Timer = time.AfterFunc(server.Duration, func() {
- if err := server.Shutdown(); err != nil {
- logrus.Errorf("unable to shutdown server: %q", err)
- }
+ server.ConnectionCh <- NOOPHandler
})
- ctx, cancelFn := context.WithCancel(context.Background())
- server.CancelFunc = cancelFn
-
- // TODO: Use ConnContext when ported to go 1.13
- ctx = context.WithValue(ctx, "decoder", server.Decoder)
- ctx = context.WithValue(ctx, "runtime", runtime)
- ctx = context.WithValue(ctx, "shutdownFunc", server.Shutdown)
- server.Context = ctx
-
router.NotFoundHandler = http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
// We can track user errors...
@@ -102,20 +96,19 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
)
for _, fn := range []func(*mux.Router) error{
- server.RegisterAuthHandlers,
- server.RegisterContainersHandlers,
- server.RegisterDistributionHandlers,
+ server.registerAuthHandlers,
+ server.registerContainersHandlers,
+ server.registerDistributionHandlers,
server.registerExecHandlers,
- server.RegisterEventsHandlers,
+ server.registerEventsHandlers,
server.registerHealthCheckHandlers,
server.registerImagesHandlers,
server.registerInfoHandlers,
- server.RegisterMonitorHandlers,
+ server.registerMonitorHandlers,
server.registerPingHandlers,
- server.RegisterPluginsHandlers,
+ server.registerPluginsHandlers,
server.registerPodsHandlers,
- server.RegisterSwaggerHandlers,
- server.RegisterSwarmHandlers,
+ server.registerSwarmHandlers,
server.registerSystemHandlers,
server.registerVersionHandlers,
server.registerVolumeHandlers,
@@ -145,7 +138,41 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
// Serve starts responding to HTTP requests
func (s *APIServer) Serve() error {
- defer s.CancelFunc()
+ // stalker to count the connections. Should the timer expire it will shutdown the service.
+ go func() {
+ for {
+ select {
+ case delta := <-s.ConnectionCh:
+ // Always stop the current timer, things will change...
+ s.Timer.Stop()
+ switch delta {
+ case EnterHandler:
+ s.ActiveConnections += 1
+ s.TotalConnections += 1
+ case ExitHandler:
+ s.ActiveConnections -= 1
+ if s.ActiveConnections == 0 {
+ // Server will be shutdown iff the timer expires before being reset or stopped
+ s.Timer = time.AfterFunc(s.Duration, func() {
+ if err := s.Shutdown(); err != nil {
+ logrus.Errorf("Failed to shutdown APIServer: %v", err)
+ os.Exit(1)
+ }
+ })
+ } else {
+ s.Timer.Reset(s.Duration)
+ }
+ case NOOPHandler:
+ // push the check out another duration...
+ s.Timer.Reset(s.Duration)
+ default:
+ logrus.Errorf("ConnectionCh received unsupported input %d", delta)
+ }
+ default:
+ time.Sleep(1 * time.Second)
+ }
+ }
+ }()
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
@@ -155,6 +182,7 @@ func (s *APIServer) Serve() error {
err := s.Server.Serve(s.Listener)
if err != nil && err != http.ErrServerClosed {
errChan <- errors.Wrap(err, "Failed to start APIServer")
+ return
}
errChan <- nil
}()
@@ -171,27 +199,23 @@ func (s *APIServer) Serve() error {
// Shutdown is a clean shutdown waiting on existing clients
func (s *APIServer) Shutdown() error {
- // Duration == 0 flags no auto-shutdown of server
+ // Duration == 0 flags no auto-shutdown of the server
if s.Duration == 0 {
+ logrus.Debug("APIServer.Shutdown ignored as Duration == 0")
return nil
}
+ logrus.Debugf("APIServer.Shutdown called %v, conn %d/%d", time.Now(), s.ActiveConnections, s.TotalConnections)
- // We're still in the sliding service window
- if s.Timer.Stop() {
- s.Timer.Reset(s.Duration)
- return nil
- }
+ // Gracefully shutdown server
+ ctx, cancel := context.WithTimeout(context.Background(), s.Duration)
+ defer cancel()
- // We've been idle for the service window, really shutdown
go func() {
- err := s.Server.Shutdown(s.Context)
+ err := s.Server.Shutdown(ctx)
if err != nil && err != context.Canceled {
logrus.Errorf("Failed to cleanly shutdown APIServer: %s", err.Error())
}
}()
-
- // Wait for graceful shutdown vs. just killing connections and dropping data
- <-s.Context.Done()
return nil
}
diff --git a/pkg/bindings/containers/create.go b/pkg/bindings/containers/create.go
new file mode 100644
index 000000000..18b32335b
--- /dev/null
+++ b/pkg/bindings/containers/create.go
@@ -0,0 +1,30 @@
+package containers
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/specgen"
+ jsoniter "github.com/json-iterator/go"
+)
+
+func CreateWithSpec(ctx context.Context, s specgen.SpecGenerator) (utils.ContainerCreateResponse, error) {
+ var ccr utils.ContainerCreateResponse
+ conn, err := bindings.GetConnectionFromContext(ctx)
+ if err != nil {
+ return ccr, err
+ }
+ specgenString, err := jsoniter.MarshalToString(s)
+ if err != nil {
+ return ccr, nil
+ }
+ stringReader := strings.NewReader(specgenString)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/containers/create", nil)
+ if err != nil {
+ return ccr, err
+ }
+ return ccr, response.Process(&ccr)
+}
diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go
index 182a39f6b..f71d55776 100644
--- a/pkg/rootless/rootless_linux.go
+++ b/pkg/rootless/rootless_linux.go
@@ -452,6 +452,7 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st
var lastErr error
var pausePid int
+ foundProcess := false
for _, path := range paths {
if !needNewNamespace {
@@ -502,12 +503,16 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st
}
pausePid, err = strconv.Atoi(string(b[:n]))
- if err == nil {
+ if err == nil && unix.Kill(pausePid, 0) == nil {
+ foundProcess = true
lastErr = nil
break
}
}
}
+ if !foundProcess {
+ return BecomeRootInUserNS(pausePidPath)
+ }
if lastErr != nil {
return false, 0, lastErr
}
diff --git a/pkg/seccomp/seccomp.go b/pkg/seccomp/seccomp.go
index dcf255378..eeba46a72 100644
--- a/pkg/seccomp/seccomp.go
+++ b/pkg/seccomp/seccomp.go
@@ -6,7 +6,7 @@ import (
"github.com/pkg/errors"
)
-// ContianerImageLabel is the key of the image annotation embedding a seccomp
+// ContainerImageLabel is the key of the image annotation embedding a seccomp
// profile.
const ContainerImageLabel = "io.containers.seccomp.profile"
diff --git a/pkg/spec/config_linux.go b/pkg/spec/config_linux.go
index 5f39b6d0d..544c0020d 100644
--- a/pkg/spec/config_linux.go
+++ b/pkg/spec/config_linux.go
@@ -32,8 +32,8 @@ func Device(d *configs.Device) spec.LinuxDevice {
}
}
-// devicesFromPath computes a list of devices
-func devicesFromPath(g *generate.Generator, devicePath string) error {
+// DevicesFromPath computes a list of devices
+func DevicesFromPath(g *generate.Generator, devicePath string) error {
devs := strings.Split(devicePath, ":")
resolvedDevicePath := devs[0]
// check if it is a symbolic link
@@ -216,7 +216,7 @@ func getDevices(path string) ([]*configs.Device, error) {
return out, nil
}
-func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error {
+func addPrivilegedDevices(g *generate.Generator) error {
hostDevices, err := getDevices("/dev")
if err != nil {
return err
@@ -280,16 +280,16 @@ func (c *CreateConfig) createBlockIO() (*spec.LinuxBlockIO, error) {
var lwds []spec.LinuxWeightDevice
ret = bio
for _, i := range c.Resources.BlkioWeightDevice {
- wd, err := validateweightDevice(i)
+ wd, err := ValidateweightDevice(i)
if err != nil {
return ret, errors.Wrapf(err, "invalid values for blkio-weight-device")
}
- wdStat, err := getStatFromPath(wd.path)
+ wdStat, err := GetStatFromPath(wd.Path)
if err != nil {
- return ret, errors.Wrapf(err, "error getting stat from path %q", wd.path)
+ return ret, errors.Wrapf(err, "error getting stat from path %q", wd.Path)
}
lwd := spec.LinuxWeightDevice{
- Weight: &wd.weight,
+ Weight: &wd.Weight,
}
lwd.Major = int64(unix.Major(wdStat.Rdev))
lwd.Minor = int64(unix.Minor(wdStat.Rdev))
@@ -347,7 +347,7 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott
if err != nil {
return []spec.LinuxThrottleDevice{}, err
}
- ltdStat, err := getStatFromPath(t.path)
+ ltdStat, err := GetStatFromPath(t.path)
if err != nil {
return ltds, errors.Wrapf(err, "error getting stat from path %q", t.path)
}
@@ -361,7 +361,7 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott
return ltds, nil
}
-func getStatFromPath(path string) (unix.Stat_t, error) {
+func GetStatFromPath(path string) (unix.Stat_t, error) {
s := unix.Stat_t{}
err := unix.Stat(path, &s)
return s, err
diff --git a/pkg/spec/config_unsupported.go b/pkg/spec/config_unsupported.go
index be3e7046d..568afde55 100644
--- a/pkg/spec/config_unsupported.go
+++ b/pkg/spec/config_unsupported.go
@@ -15,7 +15,7 @@ func addDevice(g *generate.Generator, device string) error {
return errors.New("function not implemented")
}
-func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error {
+func addPrivilegedDevices(g *generate.Generator) error {
return errors.New("function not implemented")
}
@@ -27,7 +27,7 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott
return nil, errors.New("function not implemented")
}
-func devicesFromPath(g *generate.Generator, devicePath string) error {
+func DevicesFromPath(g *generate.Generator, devicePath string) error {
return errors.New("function not implemented")
}
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 8010be0d4..5011df496 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -126,6 +126,7 @@ type SecurityConfig struct {
}
// CreateConfig is a pre OCI spec structure. It represents user input from varlink or the CLI
+// swagger:model CreateConfig
type CreateConfig struct {
Annotations map[string]string
Args []string
@@ -386,6 +387,6 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
// AddPrivilegedDevices iterates through host devices and adds all
// host devices to the spec
-func (c *CreateConfig) AddPrivilegedDevices(g *generate.Generator) error {
- return c.addPrivilegedDevices(g)
+func AddPrivilegedDevices(g *generate.Generator) error {
+ return addPrivilegedDevices(g)
}
diff --git a/pkg/spec/parse.go b/pkg/spec/parse.go
index a5dfccdb9..38d93b87f 100644
--- a/pkg/spec/parse.go
+++ b/pkg/spec/parse.go
@@ -19,12 +19,12 @@ const Pod = "pod"
// weightDevice is a structure that holds device:weight pair
type weightDevice struct {
- path string
- weight uint16
+ Path string
+ Weight uint16
}
func (w *weightDevice) String() string {
- return fmt.Sprintf("%s:%d", w.path, w.weight)
+ return fmt.Sprintf("%s:%d", w.Path, w.Weight)
}
// LinuxNS is a struct that contains namespace information
@@ -59,9 +59,9 @@ func NS(s string) string {
return ""
}
-// validateweightDevice validates that the specified string has a valid device-weight format
+// ValidateweightDevice validates that the specified string has a valid device-weight format
// for blkio-weight-device flag
-func validateweightDevice(val string) (*weightDevice, error) {
+func ValidateweightDevice(val string) (*weightDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
@@ -78,8 +78,8 @@ func validateweightDevice(val string) (*weightDevice, error) {
}
return &weightDevice{
- path: split[0],
- weight: uint16(weight),
+ Path: split[0],
+ Weight: uint16(weight),
}, nil
}
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index b2a152a2d..a4ae22efd 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -16,9 +16,9 @@ import (
"github.com/pkg/errors"
)
-const cpuPeriod = 100000
+const CpuPeriod = 100000
-func getAvailableGids() (int64, error) {
+func GetAvailableGids() (int64, error) {
idMap, err := user.ParseIDMapFile("/proc/self/gid_map")
if err != nil {
return 0, err
@@ -80,7 +80,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
gid5Available := true
if isRootless {
- nGids, err := getAvailableGids()
+ nGids, err := GetAvailableGids()
if err != nil {
return nil, err
}
@@ -197,8 +197,8 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
addedResources = true
}
if config.Resources.CPUs != 0 {
- g.SetLinuxResourcesCPUPeriod(cpuPeriod)
- g.SetLinuxResourcesCPUQuota(int64(config.Resources.CPUs * cpuPeriod))
+ g.SetLinuxResourcesCPUPeriod(CpuPeriod)
+ g.SetLinuxResourcesCPUQuota(int64(config.Resources.CPUs * CpuPeriod))
addedResources = true
}
if config.Resources.CPURtRuntime != 0 {
@@ -223,12 +223,12 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
// If privileged, we need to add all the host devices to the
// spec. We do not add the user provided ones because we are
// already adding them all.
- if err := config.AddPrivilegedDevices(&g); err != nil {
+ if err := AddPrivilegedDevices(&g); err != nil {
return nil, err
}
} else {
for _, devicePath := range config.Devices {
- if err := devicesFromPath(&g, devicePath); err != nil {
+ if err := DevicesFromPath(&g, devicePath); err != nil {
return nil, err
}
}
@@ -241,23 +241,35 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// SECURITY OPTS
+ var runtimeConfig *libpodconfig.Config
+
+ if runtime != nil {
+ runtimeConfig, err = runtime.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+ }
+
g.SetProcessNoNewPrivileges(config.Security.NoNewPrivs)
if !config.Security.Privileged {
g.SetProcessApparmorProfile(config.Security.ApparmorProfile)
}
- blockAccessToKernelFilesystems(config, &g)
-
- var runtimeConfig *libpodconfig.Config
-
- if runtime != nil {
- runtimeConfig, err = runtime.GetConfig()
- if err != nil {
+ // Unless already set via the CLI, check if we need to disable process
+ // labels or set the defaults.
+ if len(config.Security.LabelOpts) == 0 && runtimeConfig != nil {
+ if !runtimeConfig.EnableLabeling {
+ // Disabled in the config.
+ config.Security.LabelOpts = append(config.Security.LabelOpts, "disable")
+ } else if err := config.Security.SetLabelOpts(runtime, &config.Pid, &config.Ipc); err != nil {
+ // Defaults!
return nil, err
}
}
+ BlockAccessToKernelFilesystems(config.Security.Privileged, config.Pid.PidMode.IsHost(), &g)
+
// RESOURCES - PIDS
if config.Resources.PidsLimit > 0 {
// if running on rootless on a cgroupv1 machine or using the cgroupfs manager, pids
@@ -320,9 +332,9 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// BIND MOUNTS
- configSpec.Mounts = supercedeUserMounts(userMounts, configSpec.Mounts)
+ configSpec.Mounts = SupercedeUserMounts(userMounts, configSpec.Mounts)
// Process mounts to ensure correct options
- finalMounts, err := initFSMounts(configSpec.Mounts)
+ finalMounts, err := InitFSMounts(configSpec.Mounts)
if err != nil {
return nil, err
}
@@ -404,8 +416,8 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
return configSpec, nil
}
-func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) {
- if !config.Security.Privileged {
+func BlockAccessToKernelFilesystems(privileged, pidModeIsHost bool, g *generate.Generator) {
+ if !privileged {
for _, mp := range []string{
"/proc/acpi",
"/proc/kcore",
@@ -421,7 +433,7 @@ func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator)
g.AddLinuxMaskedPaths(mp)
}
- if config.Pid.PidMode.IsHost() && rootless.IsRootless() {
+ if pidModeIsHost && rootless.IsRootless() {
return
}
diff --git a/pkg/spec/storage.go b/pkg/spec/storage.go
index 0e2098c1d..e37fa2451 100644
--- a/pkg/spec/storage.go
+++ b/pkg/spec/storage.go
@@ -825,7 +825,7 @@ func (config *CreateConfig) addContainerInitBinary(path string) (spec.Mount, err
// TODO: Should we unmount subtree mounts? E.g., if /tmp/ is mounted by
// one mount, and we already have /tmp/a and /tmp/b, should we remove
// the /tmp/a and /tmp/b mounts in favor of the more general /tmp?
-func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.Mount {
+func SupercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.Mount {
if len(mounts) > 0 {
// If we have overlappings mounts, remove them from the spec in favor of
// the user-added volume mounts
@@ -854,7 +854,7 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
}
// Ensure mount options on all mounts are correct
-func initFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) {
+func InitFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) {
// We need to look up mounts so we can figure out the proper mount flags
// to apply.
systemMounts, err := pmount.GetMounts()
diff --git a/pkg/specgen/config_linux_cgo.go b/pkg/specgen/config_linux_cgo.go
new file mode 100644
index 000000000..6f547a40d
--- /dev/null
+++ b/pkg/specgen/config_linux_cgo.go
@@ -0,0 +1,62 @@
+// +build linux,cgo
+
+package specgen
+
+import (
+ "context"
+ "io/ioutil"
+
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/seccomp"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ goSeccomp "github.com/seccomp/containers-golang"
+ "github.com/sirupsen/logrus"
+)
+
+func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec, img *image.Image) (*spec.LinuxSeccomp, error) {
+ var seccompConfig *spec.LinuxSeccomp
+ var err error
+
+ scp, err := seccomp.LookupPolicy(s.SeccompPolicy)
+ if err != nil {
+ return nil, err
+ }
+
+ if scp == seccomp.PolicyImage {
+ labels, err := img.Labels(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ imagePolicy := labels[seccomp.ContainerImageLabel]
+ if len(imagePolicy) < 1 {
+ return nil, errors.New("no seccomp policy defined by image")
+ }
+ logrus.Debug("Loading seccomp profile from the security config")
+ seccompConfig, err = goSeccomp.LoadProfile(imagePolicy, configSpec)
+ if err != nil {
+ return nil, errors.Wrap(err, "loading seccomp profile failed")
+ }
+ return seccompConfig, nil
+ }
+
+ if s.SeccompProfilePath != "" {
+ logrus.Debugf("Loading seccomp profile from %q", s.SeccompProfilePath)
+ seccompProfile, err := ioutil.ReadFile(s.SeccompProfilePath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "opening seccomp profile (%s) failed", s.SeccompProfilePath)
+ }
+ seccompConfig, err = goSeccomp.LoadProfile(string(seccompProfile), configSpec)
+ if err != nil {
+ return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", s.SeccompProfilePath)
+ }
+ } else {
+ logrus.Debug("Loading default seccomp profile")
+ seccompConfig, err = goSeccomp.GetDefaultProfile(configSpec)
+ if err != nil {
+ return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", s.SeccompProfilePath)
+ }
+ }
+
+ return seccompConfig, nil
+}
diff --git a/pkg/specgen/config_linux_nocgo.go b/pkg/specgen/config_linux_nocgo.go
new file mode 100644
index 000000000..fc0c58c37
--- /dev/null
+++ b/pkg/specgen/config_linux_nocgo.go
@@ -0,0 +1,11 @@
+// +build linux,!cgo
+
+package specgen
+
+import (
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+ return nil, nil
+}
diff --git a/pkg/specgen/config_unsupported.go b/pkg/specgen/config_unsupported.go
new file mode 100644
index 000000000..5d24ac39c
--- /dev/null
+++ b/pkg/specgen/config_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package specgen
+
+import (
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+)
+
+func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+ return nil, errors.New("function not supported on non-linux OS's")
+}
diff --git a/pkg/specgen/create.go b/pkg/specgen/create.go
new file mode 100644
index 000000000..c8fee5f05
--- /dev/null
+++ b/pkg/specgen/create.go
@@ -0,0 +1,187 @@
+package specgen
+
+import (
+ "context"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/config"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "os"
+)
+
+// MakeContainer creates a container based on the SpecGenerator
+func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, error) {
+ var pod *libpod.Pod
+ if err := s.validate(rt); err != nil {
+ return nil, errors.Wrap(err, "invalid config provided")
+ }
+ rtc, err := rt.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ options, err := s.createContainerOptions(rt, pod)
+ if err != nil {
+ return nil, err
+ }
+
+ podmanPath, err := os.Executable()
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, s.createExitCommandOption(rtc, podmanPath))
+ newImage, err := rt.ImageRuntime().NewFromLocal(s.Image)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO mheon wants to talk with Dan about this
+ useImageVolumes := s.ImageVolumeMode == "bind"
+ options = append(options, libpod.WithRootFSFromImage(newImage.ID(), s.Image, useImageVolumes))
+
+ runtimeSpec, err := s.toOCISpec(rt, newImage)
+ if err != nil {
+ return nil, err
+ }
+ return rt.NewContainer(context.Background(), runtimeSpec, options...)
+}
+
+func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime, pod *libpod.Pod) ([]libpod.CtrCreateOption, error) {
+ var options []libpod.CtrCreateOption
+ var err error
+
+ if s.Stdin {
+ options = append(options, libpod.WithStdin())
+ }
+ if len(s.Systemd) > 0 {
+ options = append(options, libpod.WithSystemd())
+ }
+ if len(s.Name) > 0 {
+ logrus.Debugf("setting container name %s", s.Name)
+ options = append(options, libpod.WithName(s.Name))
+ }
+ if s.Pod != "" {
+ logrus.Debugf("adding container to pod %s", s.Pod)
+ options = append(options, rt.WithPod(pod))
+ }
+ destinations := []string{}
+ // // Take all mount and named volume destinations.
+ for _, mount := range s.Mounts {
+ destinations = append(destinations, mount.Destination)
+ }
+ for _, volume := range s.Volumes {
+ destinations = append(destinations, volume.Dest)
+ }
+ options = append(options, libpod.WithUserVolumes(destinations))
+
+ if len(s.Volumes) != 0 {
+ options = append(options, libpod.WithNamedVolumes(s.Volumes))
+ }
+
+ if len(s.Command) != 0 {
+ options = append(options, libpod.WithCommand(s.Command))
+ }
+
+ options = append(options, libpod.WithEntrypoint(s.Entrypoint))
+ if s.StopSignal != nil {
+ options = append(options, libpod.WithStopSignal(*s.StopSignal))
+ }
+ if s.StopTimeout != nil {
+ options = append(options, libpod.WithStopTimeout(*s.StopTimeout))
+ }
+ if s.LogConfiguration != nil {
+ if len(s.LogConfiguration.Path) > 0 {
+ options = append(options, libpod.WithLogPath(s.LogConfiguration.Path))
+ }
+ if len(s.LogConfiguration.Options) > 0 && s.LogConfiguration.Options["tag"] != "" {
+ // Note: I'm really guessing here.
+ options = append(options, libpod.WithLogTag(s.LogConfiguration.Options["tag"]))
+ }
+
+ if len(s.LogConfiguration.Driver) > 0 {
+ options = append(options, libpod.WithLogDriver(s.LogConfiguration.Driver))
+ }
+ }
+
+ // Security options
+ if len(s.SelinuxOpts) > 0 {
+ options = append(options, libpod.WithSecLabels(s.SelinuxOpts))
+ }
+ options = append(options, libpod.WithPrivileged(s.Privileged))
+
+ // Get namespace related options
+ namespaceOptions, err := s.generateNamespaceContainerOpts(rt)
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, namespaceOptions...)
+
+ // TODO NetworkNS still needs to be done!
+ if len(s.ConmonPidFile) > 0 {
+ options = append(options, libpod.WithConmonPidFile(s.ConmonPidFile))
+ }
+ options = append(options, libpod.WithLabels(s.Labels))
+ if s.ShmSize != nil {
+ options = append(options, libpod.WithShmSize(*s.ShmSize))
+ }
+ if s.Rootfs != "" {
+ options = append(options, libpod.WithRootFS(s.Rootfs))
+ }
+ // Default used if not overridden on command line
+
+ if s.RestartPolicy != "" {
+ if s.RestartPolicy == "unless-stopped" {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "the unless-stopped restart policy is not supported")
+ }
+ if s.RestartRetries != nil {
+ options = append(options, libpod.WithRestartRetries(*s.RestartRetries))
+ }
+ options = append(options, libpod.WithRestartPolicy(s.RestartPolicy))
+ }
+
+ if s.ContainerHealthCheckConfig.HealthConfig != nil {
+ options = append(options, libpod.WithHealthCheck(s.ContainerHealthCheckConfig.HealthConfig))
+ logrus.Debugf("New container has a health check")
+ }
+ return options, nil
+}
+
+func (s *SpecGenerator) createExitCommandOption(config *config.Config, podmanPath string) libpod.CtrCreateOption {
+ // We need a cleanup process for containers in the current model.
+ // But we can't assume that the caller is Podman - it could be another
+ // user of the API.
+ // As such, provide a way to specify a path to Podman, so we can
+ // still invoke a cleanup process.
+
+ command := []string{podmanPath,
+ "--root", config.StorageConfig.GraphRoot,
+ "--runroot", config.StorageConfig.RunRoot,
+ "--log-level", logrus.GetLevel().String(),
+ "--cgroup-manager", config.CgroupManager,
+ "--tmpdir", config.TmpDir,
+ }
+ if config.OCIRuntime != "" {
+ command = append(command, []string{"--runtime", config.OCIRuntime}...)
+ }
+ if config.StorageConfig.GraphDriverName != "" {
+ command = append(command, []string{"--storage-driver", config.StorageConfig.GraphDriverName}...)
+ }
+ for _, opt := range config.StorageConfig.GraphDriverOptions {
+ command = append(command, []string{"--storage-opt", opt}...)
+ }
+ if config.EventsLogger != "" {
+ command = append(command, []string{"--events-backend", config.EventsLogger}...)
+ }
+
+ // TODO Mheon wants to leave this for now
+ //if s.sys {
+ // command = append(command, "--syslog", "true")
+ //}
+ command = append(command, []string{"container", "cleanup"}...)
+
+ if s.Remove {
+ command = append(command, "--rm")
+ }
+ return libpod.WithExitCommand(command)
+}
diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go
new file mode 100644
index 000000000..025cb31e0
--- /dev/null
+++ b/pkg/specgen/namespaces.go
@@ -0,0 +1,467 @@
+package specgen
+
+import (
+ "os"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/capabilities"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type NamespaceMode string
+
+const (
+ // Host means the the namespace is derived from
+ // the host
+ Host NamespaceMode = "host"
+ // Path is the path to a namespace
+ Path NamespaceMode = "path"
+ // FromContainer means namespace is derived from a
+ // different container
+ FromContainer NamespaceMode = "container"
+ // FromPod indicates the namespace is derived from a pod
+ FromPod NamespaceMode = "pod"
+ // Private indicates the namespace is private
+ Private NamespaceMode = "private"
+ // NoNetwork indicates no network namespace should
+ // be joined. loopback should still exists
+ NoNetwork NamespaceMode = "none"
+ // Bridge indicates that a CNI network stack
+ // should be used
+ Bridge NamespaceMode = "bridge"
+ // Slirp indicates that a slirp4ns network stack should
+ // be used
+ Slirp NamespaceMode = "slirp4ns"
+)
+
+// Namespace describes the namespace
+type Namespace struct {
+ NSMode NamespaceMode `json:"nsmode,omitempty"`
+ Value string `json:"string,omitempty"`
+}
+
+// IsHost returns a bool if the namespace is host based
+func (n *Namespace) IsHost() bool {
+ return n.NSMode == Host
+}
+
+// IsPath indicates via bool if the namespace is based on a path
+func (n *Namespace) IsPath() bool {
+ return n.NSMode == Path
+}
+
+// IsContainer indicates via bool if the namespace is based on a container
+func (n *Namespace) IsContainer() bool {
+ return n.NSMode == FromContainer
+}
+
+// IsPod indicates via bool if the namespace is based on a pod
+func (n *Namespace) IsPod() bool {
+ return n.NSMode == FromPod
+}
+
+// IsPrivate indicates the namespace is private
+func (n *Namespace) IsPrivate() bool {
+ return n.NSMode == Private
+}
+
+// validate perform simple validation on the namespace to make sure it is not
+// invalid from the get-go
+func (n *Namespace) validate() error {
+ if n == nil {
+ return nil
+ }
+ switch n.NSMode {
+ case Host, Path, FromContainer, FromPod, Private, NoNetwork, Bridge, Slirp:
+ break
+ default:
+ return errors.Errorf("invalid network %q", n.NSMode)
+ }
+ // Path and From Container MUST have a string value set
+ if n.NSMode == Path || n.NSMode == FromContainer {
+ if len(n.Value) < 1 {
+ return errors.Errorf("namespace mode %s requires a value", n.NSMode)
+ }
+ } else {
+ // All others must NOT set a string value
+ if len(n.Value) > 0 {
+ return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode)
+ }
+ }
+ return nil
+}
+
+func (s *SpecGenerator) generateNamespaceContainerOpts(rt *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ var portBindings []ocicni.PortMapping
+ options := make([]libpod.CtrCreateOption, 0)
+
+ // Cgroups
+ switch {
+ case s.CgroupNS.IsPrivate():
+ ns := s.CgroupNS.Value
+ if _, err := os.Stat(ns); err != nil {
+ return nil, err
+ }
+ case s.CgroupNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.CgroupNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.CgroupNS.Value)
+ }
+ options = append(options, libpod.WithCgroupNSFrom(connectedCtr))
+ // TODO
+ //default:
+ // return nil, errors.New("cgroup name only supports private and container")
+ }
+
+ if s.CgroupParent != "" {
+ options = append(options, libpod.WithCgroupParent(s.CgroupParent))
+ }
+
+ if s.CgroupsMode != "" {
+ options = append(options, libpod.WithCgroupsMode(s.CgroupsMode))
+ }
+
+ // ipc
+ switch {
+ case s.IpcNS.IsHost():
+ options = append(options, libpod.WithShmDir("/dev/shm"))
+ case s.IpcNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.IpcNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.IpcNS.Value)
+ }
+ options = append(options, libpod.WithIPCNSFrom(connectedCtr))
+ options = append(options, libpod.WithShmDir(connectedCtr.ShmDir()))
+ }
+
+ // pid
+ if s.PidNS.IsContainer() {
+ connectedCtr, err := rt.LookupContainer(s.PidNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.PidNS.Value)
+ }
+ options = append(options, libpod.WithPIDNSFrom(connectedCtr))
+ }
+
+ // uts
+ switch {
+ case s.UtsNS.IsPod():
+ connectedPod, err := rt.LookupPod(s.UtsNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "pod %q not found", s.UtsNS.Value)
+ }
+ options = append(options, libpod.WithUTSNSFromPod(connectedPod))
+ case s.UtsNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.UtsNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.UtsNS.Value)
+ }
+
+ options = append(options, libpod.WithUTSNSFrom(connectedCtr))
+ }
+
+ if s.UseImageHosts {
+ options = append(options, libpod.WithUseImageHosts())
+ } else if len(s.HostAdd) > 0 {
+ options = append(options, libpod.WithHosts(s.HostAdd))
+ }
+
+ // User
+
+ switch {
+ case s.UserNS.IsPath():
+ ns := s.UserNS.Value
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined user namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ if s.IDMappings != nil {
+ options = append(options, libpod.WithIDMappings(*s.IDMappings))
+ }
+ case s.UserNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.UserNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.UserNS.Value)
+ }
+ options = append(options, libpod.WithUserNSFrom(connectedCtr))
+ default:
+ if s.IDMappings != nil {
+ options = append(options, libpod.WithIDMappings(*s.IDMappings))
+ }
+ }
+
+ options = append(options, libpod.WithUser(s.User))
+ options = append(options, libpod.WithGroups(s.Groups))
+
+ if len(s.PortMappings) > 0 {
+ portBindings = s.PortMappings
+ }
+
+ switch {
+ case s.NetNS.IsPath():
+ ns := s.NetNS.Value
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined network namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ case s.NetNS.IsContainer():
+ connectedCtr, err := rt.LookupContainer(s.NetNS.Value)
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", s.NetNS.Value)
+ }
+ options = append(options, libpod.WithNetNSFrom(connectedCtr))
+ case !s.NetNS.IsHost() && s.NetNS.NSMode != NoNetwork:
+ postConfigureNetNS := !s.UserNS.IsHost()
+ options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(s.NetNS.NSMode), s.CNINetworks))
+ }
+
+ if len(s.DNSSearch) > 0 {
+ options = append(options, libpod.WithDNSSearch(s.DNSSearch))
+ }
+ if len(s.DNSServer) > 0 {
+ // TODO I'm not sure how we are going to handle this given the input
+ if len(s.DNSServer) == 1 { //&& strings.ToLower(s.DNSServer[0].) == "none" {
+ options = append(options, libpod.WithUseImageResolvConf())
+ } else {
+ var dnsServers []string
+ for _, d := range s.DNSServer {
+ dnsServers = append(dnsServers, d.String())
+ }
+ options = append(options, libpod.WithDNS(dnsServers))
+ }
+ }
+ if len(s.DNSOption) > 0 {
+ options = append(options, libpod.WithDNSOption(s.DNSOption))
+ }
+ if s.StaticIP != nil {
+ options = append(options, libpod.WithStaticIP(*s.StaticIP))
+ }
+
+ if s.StaticMAC != nil {
+ options = append(options, libpod.WithStaticMAC(*s.StaticMAC))
+ }
+ return options, nil
+}
+
+func (s *SpecGenerator) pidConfigureGenerator(g *generate.Generator) error {
+ if s.PidNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), s.PidNS.Value)
+ }
+ if s.PidNS.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.PIDNamespace))
+ }
+ if s.PidNS.IsContainer() {
+ logrus.Debugf("using container %s pidmode", s.PidNS.Value)
+ }
+ if s.PidNS.IsPod() {
+ logrus.Debug("using pod pidmode")
+ }
+ return nil
+}
+
+func (s *SpecGenerator) utsConfigureGenerator(g *generate.Generator, runtime *libpod.Runtime) error {
+ hostname := s.Hostname
+ var err error
+ if hostname == "" {
+ switch {
+ case s.UtsNS.IsContainer():
+ utsCtr, err := runtime.GetContainer(s.UtsNS.Value)
+ if err != nil {
+ return errors.Wrapf(err, "unable to retrieve hostname from dependency container %s", s.UtsNS.Value)
+ }
+ hostname = utsCtr.Hostname()
+ case s.NetNS.IsHost() || s.UtsNS.IsHost():
+ hostname, err = os.Hostname()
+ if err != nil {
+ return errors.Wrap(err, "unable to retrieve hostname of the host")
+ }
+ default:
+ logrus.Debug("No hostname set; container's hostname will default to runtime default")
+ }
+ }
+ g.RemoveHostname()
+ if s.Hostname != "" || !s.UtsNS.IsHost() {
+ // Set the hostname in the OCI configuration only
+ // if specified by the user or if we are creating
+ // a new UTS namespace.
+ g.SetHostname(hostname)
+ }
+ g.AddProcessEnv("HOSTNAME", hostname)
+
+ if s.UtsNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), s.UtsNS.Value)
+ }
+ if s.UtsNS.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.UTSNamespace))
+ }
+ if s.UtsNS.IsContainer() {
+ logrus.Debugf("using container %s utsmode", s.UtsNS.Value)
+ }
+ return nil
+}
+
+func (s *SpecGenerator) ipcConfigureGenerator(g *generate.Generator) error {
+ if s.IpcNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), s.IpcNS.Value)
+ }
+ if s.IpcNS.IsHost() {
+ return g.RemoveLinuxNamespace(s.IpcNS.Value)
+ }
+ if s.IpcNS.IsContainer() {
+ logrus.Debugf("Using container %s ipcmode", s.IpcNS.Value)
+ }
+ return nil
+}
+
+func (s *SpecGenerator) cgroupConfigureGenerator(g *generate.Generator) error {
+ if s.CgroupNS.IsPath() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), s.CgroupNS.Value)
+ }
+ if s.CgroupNS.IsHost() {
+ return g.RemoveLinuxNamespace(s.CgroupNS.Value)
+ }
+ if s.CgroupNS.IsPrivate() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), "")
+ }
+ if s.CgroupNS.IsContainer() {
+ logrus.Debugf("Using container %s cgroup mode", s.CgroupNS.Value)
+ }
+ return nil
+}
+
+func (s *SpecGenerator) networkConfigureGenerator(g *generate.Generator) error {
+ switch {
+ case s.NetNS.IsHost():
+ logrus.Debug("Using host netmode")
+ if err := g.RemoveLinuxNamespace(string(spec.NetworkNamespace)); err != nil {
+ return err
+ }
+
+ case s.NetNS.NSMode == NoNetwork:
+ logrus.Debug("Using none netmode")
+ case s.NetNS.NSMode == Bridge:
+ logrus.Debug("Using bridge netmode")
+ case s.NetNS.IsContainer():
+ logrus.Debugf("using container %s netmode", s.NetNS.Value)
+ case s.NetNS.IsPath():
+ logrus.Debug("Using ns netmode")
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), s.NetNS.Value); err != nil {
+ return err
+ }
+ case s.NetNS.IsPod():
+ logrus.Debug("Using pod netmode, unless pod is not sharing")
+ case s.NetNS.NSMode == Slirp:
+ logrus.Debug("Using slirp4netns netmode")
+ default:
+ return errors.Errorf("unknown network mode")
+ }
+
+ if g.Config.Annotations == nil {
+ g.Config.Annotations = make(map[string]string)
+ }
+
+ if s.PublishImagePorts {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseTrue
+ } else {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseFalse
+ }
+
+ return nil
+}
+
+func (s *SpecGenerator) userConfigureGenerator(g *generate.Generator) error {
+ if s.UserNS.IsPath() {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), s.UserNS.Value); err != nil {
+ return err
+ }
+ // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping
+ g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1))
+ g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1))
+ }
+
+ if s.IDMappings != nil {
+ if (len(s.IDMappings.UIDMap) > 0 || len(s.IDMappings.GIDMap) > 0) && !s.UserNS.IsHost() {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
+ return err
+ }
+ }
+ for _, uidmap := range s.IDMappings.UIDMap {
+ g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
+ }
+ for _, gidmap := range s.IDMappings.GIDMap {
+ g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
+ }
+ }
+ return nil
+}
+
+func (s *SpecGenerator) securityConfigureGenerator(g *generate.Generator, newImage *image.Image) error {
+ // HANDLE CAPABILITIES
+ // NOTE: Must happen before SECCOMP
+ if s.Privileged {
+ g.SetupPrivileged(true)
+ }
+
+ useNotRoot := func(user string) bool {
+ if user == "" || user == "root" || user == "0" {
+ return false
+ }
+ return true
+ }
+ configSpec := g.Config
+ var err error
+ var caplist []string
+ bounding := configSpec.Process.Capabilities.Bounding
+ if useNotRoot(s.User) {
+ configSpec.Process.Capabilities.Bounding = caplist
+ }
+ caplist, err = capabilities.MergeCapabilities(configSpec.Process.Capabilities.Bounding, s.CapAdd, s.CapDrop)
+ if err != nil {
+ return err
+ }
+
+ configSpec.Process.Capabilities.Bounding = caplist
+ configSpec.Process.Capabilities.Permitted = caplist
+ configSpec.Process.Capabilities.Inheritable = caplist
+ configSpec.Process.Capabilities.Effective = caplist
+ configSpec.Process.Capabilities.Ambient = caplist
+ if useNotRoot(s.User) {
+ caplist, err = capabilities.MergeCapabilities(bounding, s.CapAdd, s.CapDrop)
+ if err != nil {
+ return err
+ }
+ }
+ configSpec.Process.Capabilities.Bounding = caplist
+
+ // HANDLE SECCOMP
+ if s.SeccompProfilePath != "unconfined" {
+ seccompConfig, err := s.getSeccompConfig(configSpec, newImage)
+ if err != nil {
+ return err
+ }
+ configSpec.Linux.Seccomp = seccompConfig
+ }
+
+ // Clear default Seccomp profile from Generator for privileged containers
+ if s.SeccompProfilePath == "unconfined" || s.Privileged {
+ configSpec.Linux.Seccomp = nil
+ }
+
+ g.SetRootReadonly(s.ReadOnlyFilesystem)
+ for sysctlKey, sysctlVal := range s.Sysctl {
+ g.AddLinuxSysctl(sysctlKey, sysctlVal)
+ }
+
+ return nil
+}
diff --git a/pkg/specgen/oci.go b/pkg/specgen/oci.go
new file mode 100644
index 000000000..2523f21b3
--- /dev/null
+++ b/pkg/specgen/oci.go
@@ -0,0 +1,260 @@
+package specgen
+
+import (
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/rootless"
+ createconfig "github.com/containers/libpod/pkg/spec"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+)
+
+func (s *SpecGenerator) toOCISpec(rt *libpod.Runtime, newImage *image.Image) (*spec.Spec, error) {
+ var (
+ inUserNS bool
+ )
+ cgroupPerm := "ro"
+ g, err := generate.New("linux")
+ if err != nil {
+ return nil, err
+ }
+ // Remove the default /dev/shm mount to ensure we overwrite it
+ g.RemoveMount("/dev/shm")
+ g.HostSpecific = true
+ addCgroup := true
+ canMountSys := true
+
+ isRootless := rootless.IsRootless()
+ if isRootless {
+ inUserNS = true
+ }
+ if !s.UserNS.IsHost() {
+ if s.UserNS.IsContainer() || s.UserNS.IsPath() {
+ inUserNS = true
+ }
+ if s.UserNS.IsPrivate() {
+ inUserNS = true
+ }
+ }
+ if inUserNS && s.NetNS.IsHost() {
+ canMountSys = false
+ }
+
+ if s.Privileged && canMountSys {
+ cgroupPerm = "rw"
+ g.RemoveMount("/sys")
+ sysMnt := spec.Mount{
+ Destination: "/sys",
+ Type: "sysfs",
+ Source: "sysfs",
+ Options: []string{"rprivate", "nosuid", "noexec", "nodev", "rw"},
+ }
+ g.AddMount(sysMnt)
+ } else if !canMountSys {
+ addCgroup = false
+ g.RemoveMount("/sys")
+ r := "ro"
+ if s.Privileged {
+ r = "rw"
+ }
+ sysMnt := spec.Mount{
+ Destination: "/sys",
+ Type: "bind", // should we use a constant for this, like createconfig?
+ Source: "/sys",
+ Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"},
+ }
+ g.AddMount(sysMnt)
+ if !s.Privileged && isRootless {
+ g.AddLinuxMaskedPaths("/sys/kernel")
+ }
+ }
+ gid5Available := true
+ if isRootless {
+ nGids, err := createconfig.GetAvailableGids()
+ if err != nil {
+ return nil, err
+ }
+ gid5Available = nGids >= 5
+ }
+ // When using a different user namespace, check that the GID 5 is mapped inside
+ // the container.
+ if gid5Available && (s.IDMappings != nil && len(s.IDMappings.GIDMap) > 0) {
+ mappingFound := false
+ for _, r := range s.IDMappings.GIDMap {
+ if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size {
+ mappingFound = true
+ break
+ }
+ }
+ if !mappingFound {
+ gid5Available = false
+ }
+
+ }
+ if !gid5Available {
+ // If we have no GID mappings, the gid=5 default option would fail, so drop it.
+ g.RemoveMount("/dev/pts")
+ devPts := spec.Mount{
+ Destination: "/dev/pts",
+ Type: "devpts",
+ Source: "devpts",
+ Options: []string{"rprivate", "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"},
+ }
+ g.AddMount(devPts)
+ }
+
+ if inUserNS && s.IpcNS.IsHost() {
+ g.RemoveMount("/dev/mqueue")
+ devMqueue := spec.Mount{
+ Destination: "/dev/mqueue",
+ Type: "bind", // constant ?
+ Source: "/dev/mqueue",
+ Options: []string{"bind", "nosuid", "noexec", "nodev"},
+ }
+ g.AddMount(devMqueue)
+ }
+ if inUserNS && s.PidNS.IsHost() {
+ g.RemoveMount("/proc")
+ procMount := spec.Mount{
+ Destination: "/proc",
+ Type: createconfig.TypeBind,
+ Source: "/proc",
+ Options: []string{"rbind", "nosuid", "noexec", "nodev"},
+ }
+ g.AddMount(procMount)
+ }
+
+ if addCgroup {
+ cgroupMnt := spec.Mount{
+ Destination: "/sys/fs/cgroup",
+ Type: "cgroup",
+ Source: "cgroup",
+ Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", cgroupPerm},
+ }
+ g.AddMount(cgroupMnt)
+ }
+ g.SetProcessCwd(s.WorkDir)
+ g.SetProcessArgs(s.Command)
+ g.SetProcessTerminal(s.Terminal)
+
+ for key, val := range s.Annotations {
+ g.AddAnnotation(key, val)
+ }
+ g.AddProcessEnv("container", "podman")
+
+ g.Config.Linux.Resources = s.ResourceLimits
+
+ // Devices
+ if s.Privileged {
+ // If privileged, we need to add all the host devices to the
+ // spec. We do not add the user provided ones because we are
+ // already adding them all.
+ if err := createconfig.AddPrivilegedDevices(&g); err != nil {
+ return nil, err
+ }
+ } else {
+ for _, device := range s.Devices {
+ if err := createconfig.DevicesFromPath(&g, device.Path); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // SECURITY OPTS
+ g.SetProcessNoNewPrivileges(s.NoNewPrivileges)
+
+ if !s.Privileged {
+ g.SetProcessApparmorProfile(s.ApparmorProfile)
+ }
+
+ createconfig.BlockAccessToKernelFilesystems(s.Privileged, s.PidNS.IsHost(), &g)
+
+ for name, val := range s.Env {
+ g.AddProcessEnv(name, val)
+ }
+
+ // TODO rlimits and ulimits needs further refinement by someone more
+ // familiar with the code.
+ //if err := addRlimits(config, &g); err != nil {
+ // return nil, err
+ //}
+
+ // NAMESPACES
+
+ if err := s.pidConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.userConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.networkConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.utsConfigureGenerator(&g, rt); err != nil {
+ return nil, err
+ }
+
+ if err := s.ipcConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+
+ if err := s.cgroupConfigureGenerator(&g); err != nil {
+ return nil, err
+ }
+ configSpec := g.Config
+
+ if err := s.securityConfigureGenerator(&g, newImage); err != nil {
+ return nil, err
+ }
+
+ // BIND MOUNTS
+ configSpec.Mounts = createconfig.SupercedeUserMounts(s.Mounts, configSpec.Mounts)
+ // Process mounts to ensure correct options
+ finalMounts, err := createconfig.InitFSMounts(configSpec.Mounts)
+ if err != nil {
+ return nil, err
+ }
+ configSpec.Mounts = finalMounts
+
+ // Add annotations
+ if configSpec.Annotations == nil {
+ configSpec.Annotations = make(map[string]string)
+ }
+
+ // TODO cidfile is not in specgen; when wiring up cli, we will need to move this out of here
+ // leaving as a reminder
+ //if config.CidFile != "" {
+ // configSpec.Annotations[libpod.InspectAnnotationCIDFile] = config.CidFile
+ //}
+
+ if s.Remove {
+ configSpec.Annotations[libpod.InspectAnnotationAutoremove] = libpod.InspectResponseTrue
+ } else {
+ configSpec.Annotations[libpod.InspectAnnotationAutoremove] = libpod.InspectResponseFalse
+ }
+
+ if len(s.VolumesFrom) > 0 {
+ configSpec.Annotations[libpod.InspectAnnotationVolumesFrom] = strings.Join(s.VolumesFrom, ",")
+ }
+
+ if s.Privileged {
+ configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseTrue
+ } else {
+ configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseFalse
+ }
+
+ // TODO Init might not make it into the specgen and therefore is not available here. We should deal
+ // with this when we wire up the CLI; leaving as a reminder
+ //if s.Init {
+ // configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseTrue
+ //} else {
+ // configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseFalse
+ //}
+
+ return configSpec, nil
+}
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index e22ee598f..e1dfe4dc5 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -2,24 +2,28 @@ package specgen
import (
"net"
+ "syscall"
"github.com/containers/image/v5/manifest"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
)
-// TODO
-// mheon provided this an off the cuff suggestion. Adding it here to retain
-// for history as we implement it. When this struct is implemented, we need
-// to remove the nolints.
-type Namespace struct {
- isHost bool //nolint
- isPath string //nolint
- isContainer string //nolint
- isPod bool //nolint
+// LogConfig describes the logging characteristics for a container
+type LogConfig struct {
+ // LogDriver is the container's log driver.
+ // Optional.
+ Driver string `json:"driver,omitempty"`
+ // LogPath is the path the container's logs will be stored at.
+ // Only available if LogDriver is set to "json-file" or "k8s-file".
+ // Optional.
+ Path string `json:"path,omitempty"`
+ // A set of options to accompany the log driver.
+ // Optional.
+ Options map[string]string `json:"options,omitempty"`
}
// ContainerBasicConfig contains the basic parts of a container.
@@ -62,7 +66,7 @@ type ContainerBasicConfig struct {
// If not provided, the default, SIGTERM, will be used.
// Will conflict with Systemd if Systemd is set to "true" or "always".
// Optional.
- StopSignal *uint `json:"stop_signal,omitempty"`
+ StopSignal *syscall.Signal `json:"stop_signal,omitempty"`
// StopTimeout is a timeout between the container's stop signal being
// sent and SIGKILL being sent.
// If not provided, the default will be used.
@@ -70,13 +74,10 @@ type ContainerBasicConfig struct {
// instead.
// Optional.
StopTimeout *uint `json:"stop_timeout,omitempty"`
- // LogDriver is the container's log driver.
- // Optional.
- LogDriver string `json:"log_driver,omitempty"`
- // LogPath is the path the container's logs will be stored at.
- // Only available if LogDriver is set to "json-file" or "k8s-file".
- // Optional.
- LogPath string `json:"log_path,omitempty"`
+ // LogConfiguration describes the logging for a container including
+ // driver, path, and options.
+ // Optional
+ LogConfiguration *LogConfig `json:"log_configuration,omitempty"`
// ConmonPidFile is a path at which a PID file for Conmon will be
// placed.
// If not given, a default location will be used.
@@ -111,12 +112,10 @@ type ContainerBasicConfig struct {
// Namespace is the libpod namespace the container will be placed in.
// Optional.
Namespace string `json:"namespace,omitempty"`
-
// PidNS is the container's PID namespace.
// It defaults to private.
// Mandatory.
PidNS Namespace `json:"pidns,omitempty"`
-
// UtsNS is the container's UTS namespace.
// It defaults to private.
// Must be set to Private to set Hostname.
@@ -128,6 +127,11 @@ type ContainerBasicConfig struct {
// Conflicts with UtsNS if UtsNS is not set to private.
// Optional.
Hostname string `json:"hostname,omitempty"`
+ // Sysctl sets kernel parameters for the container
+ Sysctl map[string]string `json:"sysctl,omitempty"`
+ // Remove indicates if the container should be removed once it has been started
+ // and exits
+ Remove bool `json:"remove"`
}
// ContainerStorageConfig contains information on the storage configuration of a
@@ -175,7 +179,7 @@ type ContainerStorageConfig struct {
// Mandatory.
IpcNS Namespace `json:"ipcns,omitempty"`
// ShmSize is the size of the tmpfs to mount in at /dev/shm, in bytes.
- // Conflicts with ShmSize if ShmSize is not private.
+ // Conflicts with ShmSize if IpcNS is not private.
// Optional.
ShmSize *int64 `json:"shm_size,omitempty"`
// WorkDir is the container's working directory.
@@ -234,6 +238,9 @@ type ContainerSecurityConfig struct {
// will use.
// Optional.
ApparmorProfile string `json:"apparmor_profile,omitempty"`
+ // SeccompPolicy determines which seccomp profile gets applied
+ // the container. valid values: empty,default,image
+ SeccompPolicy string `json:"seccomp_policy,omitempty"`
// SeccompProfilePath is the path to a JSON file containing the
// container's Seccomp profile.
// If not specified, no Seccomp profile will be used.
@@ -252,7 +259,10 @@ type ContainerSecurityConfig struct {
// IDMappings are UID and GID mappings that will be used by user
// namespaces.
// Required if UserNS is private.
- IDMappings storage.IDMappingOptions `json:"idmappings,omitempty"`
+ IDMappings *storage.IDMappingOptions `json:"idmappings,omitempty"`
+ // ReadOnlyFilesystem indicates that everything will be mounted
+ // as read-only
+ ReadOnlyFilesystem bool `json:"read_only_filesystem,omittempty"`
}
// ContainerCgroupConfig contains configuration information about a container's
@@ -260,16 +270,13 @@ type ContainerSecurityConfig struct {
type ContainerCgroupConfig struct {
// CgroupNS is the container's cgroup namespace.
// It defaults to private.
- // Conflicts with NoCgroups if not set to host.
// Mandatory.
CgroupNS Namespace `json:"cgroupns,omitempty"`
- // NoCgroups indicates that the container should not create CGroups.
- // Conflicts with CgroupParent and CgroupNS if CgroupNS is not set to
- // host.
- NoCgroups bool `json:"no_cgroups,omitempty"`
+ // CgroupsMode sets a policy for how cgroups will be created in the
+ // container, including the ability to disable creation entirely.
+ CgroupsMode string `json:"cgroups_mode,omitempty"`
// CgroupParent is the container's CGroup parent.
// If not set, the default for the current cgroup driver will be used.
- // Conflicts with NoCgroups.
// Optional.
CgroupParent string `json:"cgroup_parent,omitempty"`
}
@@ -348,7 +355,7 @@ type ContainerNetworkConfig struct {
// ContainerResourceConfig contains information on container resource limits.
type ContainerResourceConfig struct {
- // ResourceLimits are resource limits to apply to the container.
+ // ResourceLimits are resource limits to apply to the container.,
// Can only be set as root on cgroups v1 systems, but can be set as
// rootless as well for cgroups v2.
// Optional.
@@ -365,11 +372,12 @@ type ContainerResourceConfig struct {
// ContainerHealthCheckConfig describes a container healthcheck with attributes
// like command, retries, interval, start period, and timeout.
type ContainerHealthCheckConfig struct {
- HealthConfig manifest.Schema2HealthConfig `json:"healthconfig,omitempty"`
+ HealthConfig *manifest.Schema2HealthConfig `json:"healthconfig,omitempty"`
}
// SpecGenerator creates an OCI spec and Libpod configuration options to create
// a container based on the given configuration.
+// swagger:model SpecGenerator
type SpecGenerator struct {
ContainerBasicConfig
ContainerStorageConfig
@@ -381,19 +389,24 @@ type SpecGenerator struct {
}
// NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs
-func NewSpecGenerator(image, rootfs *string) (*SpecGenerator, error) {
- _ = image
- _ = rootfs
- return &SpecGenerator{}, define.ErrNotImplemented
-}
-
-// Validate verifies that the given SpecGenerator is valid and satisfies required
-// input for creating a container.
-func (s *SpecGenerator) Validate() error {
- return define.ErrNotImplemented
+func NewSpecGenerator(image string) *SpecGenerator {
+ net := ContainerNetworkConfig{
+ NetNS: Namespace{
+ NSMode: Bridge,
+ },
+ }
+ csc := ContainerStorageConfig{Image: image}
+ if rootless.IsRootless() {
+ net.NetNS.NSMode = Slirp
+ }
+ return &SpecGenerator{
+ ContainerStorageConfig: csc,
+ ContainerNetworkConfig: net,
+ }
}
-// MakeContainer creates a container based on the SpecGenerator
-func (s *SpecGenerator) MakeContainer() (*libpod.Container, error) {
- return nil, define.ErrNotImplemented
+// NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs
+func NewSpecGeneratorWithRootfs(rootfs string) *SpecGenerator {
+ csc := ContainerStorageConfig{Rootfs: rootfs}
+ return &SpecGenerator{ContainerStorageConfig: csc}
}
diff --git a/pkg/specgen/validate.go b/pkg/specgen/validate.go
new file mode 100644
index 000000000..78e4d8ad5
--- /dev/null
+++ b/pkg/specgen/validate.go
@@ -0,0 +1,159 @@
+package specgen
+
+import (
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/pkg/errors"
+)
+
+var (
+ // ErrInvalidSpecConfig describes an error that the given SpecGenerator is invalid
+ ErrInvalidSpecConfig error = errors.New("invalid configuration")
+ // SystemDValues describes the only values that SystemD can be
+ SystemDValues = []string{"true", "false", "always"}
+ // ImageVolumeModeValues describes the only values that ImageVolumeMode can be
+ ImageVolumeModeValues = []string{"ignore", "tmpfs", "anonymous"}
+)
+
+func exclusiveOptions(opt1, opt2 string) error {
+ return errors.Errorf("%s and %s are mutually exclusive options", opt1, opt2)
+}
+
+// Validate verifies that the given SpecGenerator is valid and satisfies required
+// input for creating a container.
+func (s *SpecGenerator) validate(rt *libpod.Runtime) error {
+
+ //
+ // ContainerBasicConfig
+ //
+ // Rootfs and Image cannot both populated
+ if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 {
+ return errors.Wrap(ErrInvalidSpecConfig, "both image and rootfs cannot be simultaneously")
+ }
+ // Cannot set hostname and utsns
+ if len(s.ContainerBasicConfig.Hostname) > 0 && !s.ContainerBasicConfig.UtsNS.IsPrivate() {
+ return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when creating an UTS namespace")
+ }
+ // systemd values must be true, false, or always
+ if len(s.ContainerBasicConfig.Systemd) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.Systemd), SystemDValues) {
+ return errors.Wrapf(ErrInvalidSpecConfig, "SystemD values must be one of %s", strings.Join(SystemDValues, ","))
+ }
+
+ //
+ // ContainerStorageConfig
+ //
+ // rootfs and image cannot both be set
+ if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 {
+ return exclusiveOptions("rootfs", "image")
+ }
+ // imagevolumemode must be one of ignore, tmpfs, or anonymous if given
+ if len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode), ImageVolumeModeValues) {
+ return errors.Errorf("ImageVolumeMode values must be one of %s", strings.Join(ImageVolumeModeValues, ","))
+ }
+ // shmsize conflicts with IPC namespace
+ if s.ContainerStorageConfig.ShmSize != nil && !s.ContainerStorageConfig.IpcNS.IsPrivate() {
+ return errors.New("cannot set shmsize when creating an IPC namespace")
+ }
+
+ //
+ // ContainerSecurityConfig
+ //
+ // groups and privileged are exclusive
+ if len(s.Groups) > 0 && s.Privileged {
+ return exclusiveOptions("Groups", "privileged")
+ }
+ // capadd and privileged are exclusive
+ if len(s.CapAdd) > 0 && s.Privileged {
+ return exclusiveOptions("CapAdd", "privileged")
+ }
+ // selinuxprocesslabel and privileged are exclusive
+ if len(s.SelinuxProcessLabel) > 0 && s.Privileged {
+ return exclusiveOptions("SelinuxProcessLabel", "privileged")
+ }
+ // selinuxmounmtlabel and privileged are exclusive
+ if len(s.SelinuxMountLabel) > 0 && s.Privileged {
+ return exclusiveOptions("SelinuxMountLabel", "privileged")
+ }
+ // selinuxopts and privileged are exclusive
+ if len(s.SelinuxOpts) > 0 && s.Privileged {
+ return exclusiveOptions("SelinuxOpts", "privileged")
+ }
+ // apparmor and privileged are exclusive
+ if len(s.ApparmorProfile) > 0 && s.Privileged {
+ return exclusiveOptions("AppArmorProfile", "privileged")
+ }
+ // userns and idmappings conflict
+ if s.UserNS.IsPrivate() && s.IDMappings == nil {
+ return errors.Wrap(ErrInvalidSpecConfig, "IDMappings are required when not creating a User namespace")
+ }
+
+ //
+ // ContainerCgroupConfig
+ //
+ //
+ // None for now
+
+ //
+ // ContainerNetworkConfig
+ //
+ if !s.NetNS.IsPrivate() && s.ConfigureNetNS {
+ return errors.New("can only configure network namespace when creating a network a network namespace")
+ }
+ // useimageresolveconf conflicts with dnsserver, dnssearch, dnsoption
+ if s.UseImageResolvConf {
+ if len(s.DNSServer) > 0 {
+ return exclusiveOptions("UseImageResolvConf", "DNSServer")
+ }
+ if len(s.DNSSearch) > 0 {
+ return exclusiveOptions("UseImageResolvConf", "DNSSearch")
+ }
+ if len(s.DNSOption) > 0 {
+ return exclusiveOptions("UseImageResolvConf", "DNSOption")
+ }
+ }
+ // UseImageHosts and HostAdd are exclusive
+ if s.UseImageHosts && len(s.HostAdd) > 0 {
+ return exclusiveOptions("UseImageHosts", "HostAdd")
+ }
+
+ // TODO the specgen does not appear to handle this? Should it
+ //switch config.Cgroup.Cgroups {
+ //case "disabled":
+ // if addedResources {
+ // return errors.New("cannot specify resource limits when cgroups are disabled is specified")
+ // }
+ // configSpec.Linux.Resources = &spec.LinuxResources{}
+ //case "enabled", "no-conmon", "":
+ // // Do nothing
+ //default:
+ // return errors.New("unrecognized option for cgroups; supported are 'default', 'disabled', 'no-conmon'")
+ //}
+
+ // Namespaces
+ if err := s.UtsNS.validate(); err != nil {
+ return err
+ }
+ if err := s.IpcNS.validate(); err != nil {
+ return err
+ }
+ if err := s.NetNS.validate(); err != nil {
+ return err
+ }
+ if err := s.PidNS.validate(); err != nil {
+ return err
+ }
+ if err := s.CgroupNS.validate(); err != nil {
+ return err
+ }
+ if err := s.UserNS.validate(); err != nil {
+ return err
+ }
+
+ // The following are defaults as needed by container creation
+ if len(s.WorkDir) < 1 {
+ s.WorkDir = "/"
+ }
+ return nil
+}
diff --git a/pkg/systemd/activation.go b/pkg/systemd/activation.go
new file mode 100644
index 000000000..c8b2389dc
--- /dev/null
+++ b/pkg/systemd/activation.go
@@ -0,0 +1,40 @@
+package systemd
+
+import (
+ "os"
+ "strconv"
+ "strings"
+)
+
+// SocketActivated determine if podman is running under the socket activation protocol
+func SocketActivated() bool {
+ pid, pid_found := os.LookupEnv("LISTEN_PID")
+ fds, fds_found := os.LookupEnv("LISTEN_FDS")
+ fdnames, fdnames_found := os.LookupEnv("LISTEN_FDNAMES")
+
+ if !(pid_found && fds_found && fdnames_found) {
+ return false
+ }
+
+ p, err := strconv.Atoi(pid)
+ if err != nil || p != os.Getpid() {
+ return false
+ }
+
+ nfds, err := strconv.Atoi(fds)
+ if err != nil || nfds < 1 {
+ return false
+ }
+
+ // First available file descriptor is always 3.
+ if nfds > 1 {
+ names := strings.Split(fdnames, ":")
+ for _, n := range names {
+ if strings.Contains(n, "podman") {
+ return true
+ }
+ }
+ }
+
+ return true
+}
diff --git a/pkg/systemdgen/systemdgen.go b/pkg/systemd/generate/systemdgen.go
index 26b3b3756..404347828 100644
--- a/pkg/systemdgen/systemdgen.go
+++ b/pkg/systemd/generate/systemdgen.go
@@ -1,4 +1,4 @@
-package systemdgen
+package generate
import (
"bytes"
diff --git a/pkg/systemdgen/systemdgen_test.go b/pkg/systemd/generate/systemdgen_test.go
index ee2429407..b74b75258 100644
--- a/pkg/systemdgen/systemdgen_test.go
+++ b/pkg/systemd/generate/systemdgen_test.go
@@ -1,4 +1,4 @@
-package systemdgen
+package generate
import (
"testing"
diff --git a/test/e2e/login_logout_test.go b/test/e2e/login_logout_test.go
index 78c9b52d9..42698d270 100644
--- a/test/e2e/login_logout_test.go
+++ b/test/e2e/login_logout_test.go
@@ -19,14 +19,15 @@ import (
var _ = Describe("Podman login and logout", func() {
var (
- tempdir string
- err error
- podmanTest *PodmanTestIntegration
- authPath string
- certPath string
- port int
- server string
- testImg string
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ authPath string
+ certPath string
+ port int
+ server string
+ testImg string
+ registriesConfWithSearch []byte
)
BeforeEach(func() {
@@ -64,6 +65,9 @@ var _ = Describe("Podman login and logout", func() {
f.Sync()
port = 4999 + config.GinkgoConfig.ParallelNode
server = strings.Join([]string{"localhost", strconv.Itoa(port)}, ":")
+
+ registriesConfWithSearch = []byte(fmt.Sprintf("[registries.search]\nregistries = ['%s']", server))
+
testImg = strings.Join([]string{server, "test-apline"}, "/")
os.MkdirAll(filepath.Join("/etc/containers/certs.d", server), os.ModePerm)
@@ -113,6 +117,38 @@ var _ = Describe("Podman login and logout", func() {
Expect(session).To(ExitWithError())
})
+ It("podman login and logout without registry parameter", func() {
+ SkipIfRootless()
+
+ registriesConf, err := ioutil.TempFile("", "TestLoginWithoutParameter")
+ Expect(err).To(BeNil())
+ defer registriesConf.Close()
+ defer os.Remove(registriesConf.Name())
+
+ err = ioutil.WriteFile(registriesConf.Name(), []byte(registriesConfWithSearch), os.ModePerm)
+ Expect(err).To(BeNil())
+
+ // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not
+ // run in parallel unless they opt in by calling t.Parallel(). So don’t do that.
+ oldRCP, hasRCP := os.LookupEnv("REGISTRIES_CONFIG_PATH")
+ defer func() {
+ if hasRCP {
+ os.Setenv("REGISTRIES_CONFIG_PATH", oldRCP)
+ } else {
+ os.Unsetenv("REGISTRIES_CONFIG_PATH")
+ }
+ }()
+ os.Setenv("REGISTRIES_CONFIG_PATH", registriesConf.Name())
+
+ session := podmanTest.Podman([]string{"login", "-u", "podmantest", "-p", "test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To((Equal(0)))
+
+ session = podmanTest.Podman([]string{"logout"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
It("podman login and logout with flag --authfile", func() {
SkipIfRootless()
authFile := filepath.Join(podmanTest.TempDir, "auth.json")
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index 48dd186e2..adbb9c16c 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -170,10 +170,11 @@ var _ = Describe("Podman ps", func() {
_, ec, _ := podmanTest.RunLsContainer("test1")
Expect(ec).To(Equal(0))
- result := podmanTest.Podman([]string{"ps", "-a", "--format", "table {{.ID}} {{.Image}} {{.Labels}}"})
+ result := podmanTest.Podman([]string{"ps", "-a", "--format", "table {{.ID}} {{.Image}} {{.ImageID}} {{.Labels}}"})
result.WaitWithDefaultTimeout()
Expect(strings.Contains(result.OutputToStringArray()[0], "table")).To(BeFalse())
Expect(strings.Contains(result.OutputToStringArray()[0], "ID")).To(BeTrue())
+ Expect(strings.Contains(result.OutputToStringArray()[0], "ImageID")).To(BeTrue())
Expect(strings.Contains(result.OutputToStringArray()[1], "alpine:latest")).To(BeTrue())
Expect(result.ExitCode()).To(Equal(0))
})
diff --git a/test/system/150-login.bats b/test/system/150-login.bats
new file mode 100644
index 000000000..9c9593311
--- /dev/null
+++ b/test/system/150-login.bats
@@ -0,0 +1,339 @@
+#!/usr/bin/env bats -*- bats -*-
+#
+# tests for podman login
+#
+
+load helpers
+
+###############################################################################
+# BEGIN one-time envariable setup
+
+# Create a scratch directory; our podman registry will run from here. We
+# also use it for other temporary files like authfiles.
+if [ -z "${PODMAN_LOGIN_WORKDIR}" ]; then
+ export PODMAN_LOGIN_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} podman_bats_login.XXXXXX)
+fi
+
+# Randomly-generated username and password
+if [ -z "${PODMAN_LOGIN_USER}" ]; then
+ export PODMAN_LOGIN_USER="user$(random_string 4)"
+ export PODMAN_LOGIN_PASS=$(random_string 15)
+fi
+
+# Randomly-assigned port in the 5xxx range
+if [ -z "${PODMAN_LOGIN_REGISTRY_PORT}" ]; then
+ for port in $(shuf -i 5000-5999);do
+ if ! { exec 3<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
+ export PODMAN_LOGIN_REGISTRY_PORT=$port
+ break
+ fi
+ done
+fi
+
+# Override any user-set path to an auth file
+unset REGISTRY_AUTH_FILE
+
+# END one-time envariable setup
+###############################################################################
+# BEGIN filtering - none of these tests will work with podman-remote
+
+function setup() {
+ skip_if_remote "none of these tests work with podman-remote"
+
+ basic_setup
+}
+
+# END filtering - none of these tests will work with podman-remote
+###############################################################################
+# BEGIN first "test" - start a registry for use by other tests
+#
+# This isn't really a test: it's a helper that starts a local registry.
+# Note that we're careful to use a root/runroot separate from our tests,
+# so setup/teardown don't clobber our registry image.
+#
+
+@test "podman login [start registry]" {
+ AUTHDIR=${PODMAN_LOGIN_WORKDIR}/auth
+ mkdir -p $AUTHDIR
+
+ # Pull registry image, but into a separate container storage
+ mkdir -p ${PODMAN_LOGIN_WORKDIR}/root
+ mkdir -p ${PODMAN_LOGIN_WORKDIR}/runroot
+ PODMAN_LOGIN_ARGS="--root ${PODMAN_LOGIN_WORKDIR}/root --runroot ${PODMAN_LOGIN_WORKDIR}/runroot"
+ # Give it three tries, to compensate for flakes
+ run_podman ${PODMAN_LOGIN_ARGS} pull registry:2 ||
+ run_podman ${PODMAN_LOGIN_ARGS} pull registry:2 ||
+ run_podman ${PODMAN_LOGIN_ARGS} pull registry:2
+
+ # Registry image needs a cert. Self-signed is good enough.
+ CERT=$AUTHDIR/domain.crt
+ if [ ! -e $CERT ]; then
+ openssl req -newkey rsa:4096 -nodes -sha256 \
+ -keyout $AUTHDIR/domain.key -x509 -days 2 \
+ -out $AUTHDIR/domain.crt \
+ -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
+ fi
+
+ # Store credentials where container will see them
+ if [ ! -e $AUTHDIR/htpasswd ]; then
+ run_podman ${PODMAN_LOGIN_ARGS} run --rm \
+ --entrypoint htpasswd registry:2 \
+ -Bbn ${PODMAN_LOGIN_USER} ${PODMAN_LOGIN_PASS} \
+ > $AUTHDIR/htpasswd
+
+ # In case $PODMAN_TEST_KEEP_LOGIN_REGISTRY is set, for testing later
+ echo "${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS}" \
+ > $AUTHDIR/htpasswd-plaintext
+ fi
+
+ # Run the registry container.
+ run_podman '?' ${PODMAN_LOGIN_ARGS} rm -f registry
+ run_podman ${PODMAN_LOGIN_ARGS} run -d \
+ -p ${PODMAN_LOGIN_REGISTRY_PORT}:5000 \
+ --name registry \
+ -v $AUTHDIR:/auth:Z \
+ -e "REGISTRY_AUTH=htpasswd" \
+ -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
+ -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
+ -e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt \
+ -e REGISTRY_HTTP_TLS_KEY=/auth/domain.key \
+ registry:2
+}
+
+# END first "test" - start a registry for use by other tests
+###############################################################################
+# BEGIN actual tests
+# BEGIN primary podman login/push/pull tests
+
+@test "podman login - basic test" {
+ run_podman login --tls-verify=false \
+ --username ${PODMAN_LOGIN_USER} \
+ --password ${PODMAN_LOGIN_PASS} \
+ localhost:${PODMAN_LOGIN_REGISTRY_PORT}
+ is "$output" "Login Succeeded!" "output from podman login"
+
+ # Now log out
+ run_podman logout localhost:${PODMAN_LOGIN_REGISTRY_PORT}
+ is "$output" "Removed login credentials for localhost:${PODMAN_LOGIN_REGISTRY_PORT}" \
+ "output from podman logout"
+}
+
+@test "podman login - with wrong credentials" {
+ registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
+
+ run_podman 125 login --tls-verify=false \
+ --username ${PODMAN_LOGIN_USER} \
+ --password "x${PODMAN_LOGIN_PASS}" \
+ $registry
+ is "$output" \
+ "Error: error logging into \"$registry\": invalid username/password" \
+ 'output from podman login'
+}
+
+@test "podman login - check generated authfile" {
+ authfile=${PODMAN_LOGIN_WORKDIR}/auth-$(random_string 10).json
+ rm -f $authfile
+
+ registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
+
+ run_podman login --authfile=$authfile \
+ --tls-verify=false \
+ --username ${PODMAN_LOGIN_USER} \
+ --password ${PODMAN_LOGIN_PASS} \
+ $registry
+
+ # Confirm that authfile now exists
+ test -e $authfile || \
+ die "podman login did not create authfile $authfile"
+
+ # Special bracket form needed because of colon in host:port
+ run jq -r ".[\"auths\"][\"$registry\"][\"auth\"]" <$authfile
+ is "$status" "0" "jq from $authfile"
+
+ expect_userpass="${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS}"
+ actual_userpass=$(base64 -d <<<"$output")
+ is "$actual_userpass" "$expect_userpass" "credentials stored in $authfile"
+
+
+ # Now log out and make sure credentials are removed
+ run_podman logout --authfile=$authfile $registry
+
+ run jq -r '.auths' <$authfile
+ is "$status" "0" "jq from $authfile"
+ is "$output" "{}" "credentials removed from $authfile"
+}
+
+# Some push tests
+@test "podman push fail" {
+ # Create an invalid authfile
+ authfile=${PODMAN_LOGIN_WORKDIR}/auth-$(random_string 10).json
+ rm -f $authfile
+
+ wrong_auth=$(base64 <<<"baduser:wrongpassword")
+ cat >$authfile <<EOF
+{
+ "auths": {
+ "localhost:${PODMAN_LOGIN_REGISTRY_PORT}": {
+ "auth": "$wrong_auth"
+ }
+ }
+}
+EOF
+
+ run_podman 125 push --authfile=$authfile \
+ --tls-verify=false $IMAGE \
+ localhost:${PODMAN_LOGIN_REGISTRY_PORT}/badpush:1
+ is "$output" ".*: unauthorized: authentication required" \
+ "auth error on push"
+}
+
+@test "podman push ok" {
+ # ARGH! We can't push $IMAGE (alpine_labels) to this registry; error is:
+ #
+ # Writing manifest to image destination
+ # Error: Error copying image to the remote destination: Error writing manifest: Error uploading manifest latest to localhost:${PODMAN_LOGIN_REGISTRY_PORT}/okpush: received unexpected HTTP status: 500 Internal Server Error
+ #
+ # Root cause: something to do with v1/v2 s1/s2:
+ #
+ # https://github.com/containers/skopeo/issues/651
+ #
+ run_podman pull busybox
+
+ # Preserve its ID for later comparison against push/pulled image
+ run_podman inspect --format '{{.Id}}' busybox
+ id_busybox=$output
+
+ destname=ok-$(random_string 10 | tr A-Z a-z)-ok
+ # Use command-line credentials
+ run_podman push --tls-verify=false \
+ --creds ${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS} \
+ busybox localhost:${PODMAN_LOGIN_REGISTRY_PORT}/$destname
+
+ # Yay! Pull it back
+ run_podman pull --tls-verify=false \
+ --creds ${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS} \
+ localhost:${PODMAN_LOGIN_REGISTRY_PORT}/$destname
+
+ # Compare to original busybox
+ run_podman inspect --format '{{.Id}}' $destname
+ is "$output" "$id_busybox" "Image ID of pulled image == busybox"
+
+ run_podman rmi busybox $destname
+}
+
+# END primary podman login/push/pull tests
+###############################################################################
+# BEGIN cooperation with skopeo
+
+# Skopeo helper - keep this separate, so we can test with different
+# envariable settings
+function _test_skopeo_credential_sharing() {
+ if ! type -p skopeo; then
+ skip "skopeo not available"
+ fi
+
+ registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
+
+ run_podman login "$@" --tls-verify=false \
+ --username ${PODMAN_LOGIN_USER} \
+ --password ${PODMAN_LOGIN_PASS} \
+ $registry
+
+ destname=skopeo-ok-$(random_string 10 | tr A-Z a-z)-ok
+ echo "# skopeo copy ..."
+ run skopeo copy "$@" \
+ --format=v2s2 \
+ --dest-tls-verify=false \
+ containers-storage:$IMAGE \
+ docker://$registry/$destname
+ echo "$output"
+ is "$status" "0" "skopeo copy - exit status"
+ is "$output" ".*Copying blob .*" "output of skopeo copy"
+ is "$output" ".*Copying config .*" "output of skopeo copy"
+ is "$output" ".*Writing manifest .*" "output of skopeo copy"
+
+ echo "# skopeo inspect ..."
+ run skopeo inspect "$@" --tls-verify=false docker://$registry/$destname
+ echo "$output"
+ is "$status" "0" "skopeo inspect - exit status"
+
+ got_name=$(jq -r .Name <<<"$output")
+ is "$got_name" "$registry/$dest_name" "skopeo inspect -> Name"
+
+ # Now try without a valid login; it should fail
+ run_podman logout "$@" $registry
+ echo "# skopeo inspect [with no credentials] ..."
+ run skopeo inspect "$@" --tls-verify=false docker://$registry/$destname
+ echo "$output"
+ is "$status" "1" "skopeo inspect - exit status"
+ is "$output" ".*: unauthorized: authentication required" \
+ "auth error on skopeo inspect"
+}
+
+@test "podman login - shares credentials with skopeo - default auth file" {
+ if is_rootless; then
+ if [ -z "${XDG_RUNTIME_DIR}" ]; then
+ skip "skopeo does not match podman when XDG_RUNTIME_DIR unset; #823"
+ fi
+ fi
+ _test_skopeo_credential_sharing
+}
+
+@test "podman login - shares credentials with skopeo - via envariable" {
+ skip "skopeo does not yet support REGISTRY_AUTH_FILE; #822"
+ authfile=${PODMAN_LOGIN_WORKDIR}/auth-$(random_string 10).json
+ rm -f $authfile
+
+ REGISTRY_AUTH_FILE=$authfile _test_skopeo_credential_sharing
+ rm -f $authfile
+}
+
+@test "podman login - shares credentials with skopeo - via --authfile" {
+ # Also test that command-line --authfile overrides envariable
+ authfile=${PODMAN_LOGIN_WORKDIR}/auth-$(random_string 10).json
+ rm -f $authfile
+
+ fake_authfile=${PODMAN_LOGIN_WORKDIR}/auth-$(random_string 10).json
+ rm -f $fake_authfile
+
+ REGISTRY_AUTH_FILE=$authfile _test_skopeo_credential_sharing --authfile=$authfile
+
+ if [ -e $fake_authfile ]; then
+ die "REGISTRY_AUTH_FILE overrode command-line --authfile!"
+ fi
+ rm -f $authfile
+}
+
+# END cooperation with skopeo
+# END actual tests
+###############################################################################
+# BEGIN teardown (remove the registry container)
+
+@test "podman login [stop registry, clean up]" {
+ # For manual debugging; user may request keeping the registry running
+ if [ -n "${PODMAN_TEST_KEEP_LOGIN_REGISTRY}" ]; then
+ skip "[leaving registry running by request]"
+ fi
+
+ run_podman --root ${PODMAN_LOGIN_WORKDIR}/root \
+ --runroot ${PODMAN_LOGIN_WORKDIR}/runroot \
+ rm -f registry
+ run_podman --root ${PODMAN_LOGIN_WORKDIR}/root \
+ --runroot ${PODMAN_LOGIN_WORKDIR}/runroot \
+ rmi -a
+
+ # By default, clean up
+ if [ -z "${PODMAN_TEST_KEEP_LOGIN_WORKDIR}" ]; then
+ rm -rf ${PODMAN_LOGIN_WORKDIR}
+ fi
+
+ # Make sure socket is closed
+ if { exec 3<> /dev/tcp/127.0.0.1/${PODMAN_LOGIN_REGISTRY_PORT}; } &>/dev/null; then
+ die "Socket still seems open"
+ fi
+}
+
+# END teardown (remove the registry container)
+###############################################################################
+
+# vim: filetype=sh
diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
new file mode 100644
index 000000000..be0791620
--- /dev/null
+++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## The Containers Storage Project Community Code of Conduct
+
+The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 1b69d6060..09937303b 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -54,19 +54,19 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.
containers-storage: $(sources) ## build using gc on the host
$(GO_BUILD) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
-layers_ffjson.go: layers.go
+layers_ffjson.go: $(FFJSON) layers.go
$(RM) $@
$(FFJSON) layers.go
-images_ffjson.go: images.go
+images_ffjson.go: $(FFJSON) images.go
$(RM) $@
$(FFJSON) images.go
-containers_ffjson.go: containers.go
+containers_ffjson.go: $(FFJSON) containers.go
$(RM) $@
$(FFJSON) containers.go
-pkg/archive/archive_ffjson.go: pkg/archive/archive.go
+pkg/archive/archive_ffjson.go: $(FFJSON) pkg/archive/archive.go
$(RM) $@
$(FFJSON) pkg/archive/archive.go
@@ -118,6 +118,9 @@ validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor u
install.tools:
make -C tests/tools
+$(FFJSON):
+ make -C tests/tools build/ffjson
+
install.docs: docs
make -C docs install
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 98e863cdf..15b989e39 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.15.8
+1.16.0
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index ba40f9c14..84dd86a20 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -7,10 +7,10 @@ require (
github.com/Microsoft/hcsshim v0.8.7
github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
github.com/docker/go-units v0.4.0
- github.com/klauspost/compress v1.9.8
+ github.com/klauspost/compress v1.10.0
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/pgzip v1.2.1
- github.com/mattn/go-shellwords v1.0.9
+ github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/runc v1.0.0-rc9
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index e2785594d..c2029949a 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -79,6 +79,8 @@ github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug2
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y=
+github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
@@ -93,6 +95,8 @@ github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWeP
github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.9 h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
+github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index 0dde97c18..e1954ad04 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./images.go
+// source: images.go
package storage
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
new file mode 100644
index 000000000..3a1095226
--- /dev/null
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -0,0 +1,2156 @@
+// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
+// source: layers.go
+
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/opencontainers/go-digest"
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+)
+
+// MarshalJSON marshal bytes to json - template
+func (j *DiffOptions) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if j.Compression != nil {
+ buf.WriteString(`{"Compression":`)
+ fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0)
+ } else {
+ buf.WriteString(`{"Compression":null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtDiffOptionsbase = iota
+ ffjtDiffOptionsnosuchkey
+
+ ffjtDiffOptionsCompression
+)
+
+var ffjKeyDiffOptionsCompression = []byte("Compression")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *DiffOptions) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtDiffOptionsbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'C':
+
+ if bytes.Equal(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) {
+ currentKey = ffjtDiffOptionsCompression
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtDiffOptionsnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtDiffOptionsCompression:
+ goto handle_Compression
+
+ case ffjtDiffOptionsnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Compression:
+
+ /* handler: j.Compression type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ j.Compression = nil
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ ttypval := archive.Compression(tval)
+ j.Compression = &ttypval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *Layer) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{ "id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteByte(',')
+ if len(j.Names) != 0 {
+ buf.WriteString(`"names":`)
+ if j.Names != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.Names {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Parent) != 0 {
+ buf.WriteString(`"parent":`)
+ fflib.WriteJsonString(buf, string(j.Parent))
+ buf.WriteByte(',')
+ }
+ if len(j.Metadata) != 0 {
+ buf.WriteString(`"metadata":`)
+ fflib.WriteJsonString(buf, string(j.Metadata))
+ buf.WriteByte(',')
+ }
+ if len(j.MountLabel) != 0 {
+ buf.WriteString(`"mountlabel":`)
+ fflib.WriteJsonString(buf, string(j.MountLabel))
+ buf.WriteByte(',')
+ }
+ if true {
+ buf.WriteString(`"created":`)
+
+ {
+
+ obj, err = j.Created.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ buf.Write(obj)
+
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.CompressedDigest) != 0 {
+ buf.WriteString(`"compressed-diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.CompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.CompressedSize != 0 {
+ buf.WriteString(`"compressed-size":`)
+ fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.UncompressedDigest) != 0 {
+ buf.WriteString(`"diff-digest":`)
+ fflib.WriteJsonString(buf, string(j.UncompressedDigest))
+ buf.WriteByte(',')
+ }
+ if j.UncompressedSize != 0 {
+ buf.WriteString(`"diff-size":`)
+ fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0)
+ buf.WriteByte(',')
+ }
+ if j.CompressionType != 0 {
+ buf.WriteString(`"compression":`)
+ fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0)
+ buf.WriteByte(',')
+ }
+ if len(j.UIDs) != 0 {
+ buf.WriteString(`"uidset":`)
+ if j.UIDs != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.UIDs {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, false)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.GIDs) != 0 {
+ buf.WriteString(`"gidset":`)
+ if j.GIDs != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.GIDs {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, false)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.Flags) != 0 {
+ buf.WriteString(`"flags":`)
+ /* Falling back. type=map[string]interface {} kind=map */
+ err = buf.Encode(j.Flags)
+ if err != nil {
+ return err
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.UIDMap) != 0 {
+ buf.WriteString(`"uidmap":`)
+ if j.UIDMap != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.UIDMap {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ /* Struct fall back. type=idtools.IDMap kind=struct */
+ err = buf.Encode(&v)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ if len(j.GIDMap) != 0 {
+ buf.WriteString(`"gidmap":`)
+ if j.GIDMap != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.GIDMap {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ /* Struct fall back. type=idtools.IDMap kind=struct */
+ err = buf.Encode(&v)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
+ buf.Rewind(1)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtLayerbase = iota
+ ffjtLayernosuchkey
+
+ ffjtLayerID
+
+ ffjtLayerNames
+
+ ffjtLayerParent
+
+ ffjtLayerMetadata
+
+ ffjtLayerMountLabel
+
+ ffjtLayerCreated
+
+ ffjtLayerCompressedDigest
+
+ ffjtLayerCompressedSize
+
+ ffjtLayerUncompressedDigest
+
+ ffjtLayerUncompressedSize
+
+ ffjtLayerCompressionType
+
+ ffjtLayerUIDs
+
+ ffjtLayerGIDs
+
+ ffjtLayerFlags
+
+ ffjtLayerUIDMap
+
+ ffjtLayerGIDMap
+)
+
+var ffjKeyLayerID = []byte("id")
+
+var ffjKeyLayerNames = []byte("names")
+
+var ffjKeyLayerParent = []byte("parent")
+
+var ffjKeyLayerMetadata = []byte("metadata")
+
+var ffjKeyLayerMountLabel = []byte("mountlabel")
+
+var ffjKeyLayerCreated = []byte("created")
+
+var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest")
+
+var ffjKeyLayerCompressedSize = []byte("compressed-size")
+
+var ffjKeyLayerUncompressedDigest = []byte("diff-digest")
+
+var ffjKeyLayerUncompressedSize = []byte("diff-size")
+
+var ffjKeyLayerCompressionType = []byte("compression")
+
+var ffjKeyLayerUIDs = []byte("uidset")
+
+var ffjKeyLayerGIDs = []byte("gidset")
+
+var ffjKeyLayerFlags = []byte("flags")
+
+var ffjKeyLayerUIDMap = []byte("uidmap")
+
+var ffjKeyLayerGIDMap = []byte("gidmap")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *Layer) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtLayerbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'd':
+
+ if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'g':
+
+ if bytes.Equal(ffjKeyLayerGIDs, kn) {
+ currentKey = ffjtLayerGIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerGIDMap, kn) {
+ currentKey = ffjtLayerGIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffjKeyLayerUIDs, kn) {
+ currentKey = ffjtLayerUIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyLayerUIDMap, kn) {
+ currentKey = ffjtLayerUIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerGIDMap, kn) {
+ currentKey = ffjtLayerGIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerUIDMap, kn) {
+ currentKey = ffjtLayerUIDMap
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) {
+ currentKey = ffjtLayerFlags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerGIDs, kn) {
+ currentKey = ffjtLayerGIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUIDs, kn) {
+ currentKey = ffjtLayerUIDs
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) {
+ currentKey = ffjtLayerCompressionType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) {
+ currentKey = ffjtLayerUncompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) {
+ currentKey = ffjtLayerUncompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) {
+ currentKey = ffjtLayerCompressedSize
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) {
+ currentKey = ffjtLayerCompressedDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) {
+ currentKey = ffjtLayerCreated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) {
+ currentKey = ffjtLayerMountLabel
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) {
+ currentKey = ffjtLayerMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) {
+ currentKey = ffjtLayerParent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffjKeyLayerNames, kn) {
+ currentKey = ffjtLayerNames
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) {
+ currentKey = ffjtLayerID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtLayernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtLayerID:
+ goto handle_ID
+
+ case ffjtLayerNames:
+ goto handle_Names
+
+ case ffjtLayerParent:
+ goto handle_Parent
+
+ case ffjtLayerMetadata:
+ goto handle_Metadata
+
+ case ffjtLayerMountLabel:
+ goto handle_MountLabel
+
+ case ffjtLayerCreated:
+ goto handle_Created
+
+ case ffjtLayerCompressedDigest:
+ goto handle_CompressedDigest
+
+ case ffjtLayerCompressedSize:
+ goto handle_CompressedSize
+
+ case ffjtLayerUncompressedDigest:
+ goto handle_UncompressedDigest
+
+ case ffjtLayerUncompressedSize:
+ goto handle_UncompressedSize
+
+ case ffjtLayerCompressionType:
+ goto handle_CompressionType
+
+ case ffjtLayerUIDs:
+ goto handle_UIDs
+
+ case ffjtLayerGIDs:
+ goto handle_GIDs
+
+ case ffjtLayerFlags:
+ goto handle_Flags
+
+ case ffjtLayerUIDMap:
+ goto handle_UIDMap
+
+ case ffjtLayerGIDMap:
+ goto handle_GIDMap
+
+ case ffjtLayernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Names:
+
+ /* handler: j.Names type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Names = nil
+ } else {
+
+ j.Names = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJNames string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJNames type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJNames = string(string(outBuf))
+
+ }
+ }
+
+ j.Names = append(j.Names, tmpJNames)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Parent:
+
+ /* handler: j.Parent type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Parent = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Metadata:
+
+ /* handler: j.Metadata type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Metadata = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountLabel:
+
+ /* handler: j.MountLabel type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountLabel = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Created:
+
+ /* handler: j.Created type=time.Time kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = j.Created.UnmarshalJSON(tbuf)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedDigest:
+
+ /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.CompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressedSize:
+
+ /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedDigest:
+
+ /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.UncompressedDigest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UncompressedSize:
+
+ /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.UncompressedSize = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CompressionType:
+
+ /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.CompressionType = archive.Compression(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UIDs:
+
+ /* handler: j.UIDs type=[]uint32 kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.UIDs = nil
+ } else {
+
+ j.UIDs = []uint32{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJUIDs uint32
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJUIDs type=uint32 kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJUIDs = uint32(tval)
+
+ }
+ }
+
+ j.UIDs = append(j.UIDs, tmpJUIDs)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_GIDs:
+
+ /* handler: j.GIDs type=[]uint32 kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.GIDs = nil
+ } else {
+
+ j.GIDs = []uint32{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJGIDs uint32
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJGIDs type=uint32 kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmpJGIDs = uint32(tval)
+
+ }
+ }
+
+ j.GIDs = append(j.GIDs, tmpJGIDs)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Flags:
+
+ /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.Flags = nil
+ } else {
+
+ j.Flags = make(map[string]interface{}, 0)
+
+ wantVal := true
+
+ for {
+
+ var k string
+
+ var tmpJFlags interface{}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: k type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ k = string(string(outBuf))
+
+ }
+ }
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/
+
+ {
+ /* Falling back. type=interface {} kind=interface */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJFlags)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.Flags[k] = tmpJFlags
+
+ wantVal = false
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UIDMap:
+
+ /* handler: j.UIDMap type=[]idtools.IDMap kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.UIDMap = nil
+ } else {
+
+ j.UIDMap = []idtools.IDMap{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJUIDMap idtools.IDMap
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJUIDMap type=idtools.IDMap kind=struct quoted=false*/
+
+ {
+ /* Falling back. type=idtools.IDMap kind=struct */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJUIDMap)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.UIDMap = append(j.UIDMap, tmpJUIDMap)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_GIDMap:
+
+ /* handler: j.GIDMap type=[]idtools.IDMap kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.GIDMap = nil
+ } else {
+
+ j.GIDMap = []idtools.IDMap{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJGIDMap idtools.IDMap
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJGIDMap type=idtools.IDMap kind=struct quoted=false*/
+
+ {
+ /* Falling back. type=idtools.IDMap kind=struct */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &tmpJGIDMap)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+
+ j.GIDMap = append(j.GIDMap, tmpJGIDMap)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerMountPoint) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"id":`)
+ fflib.WriteJsonString(buf, string(j.ID))
+ buf.WriteString(`,"path":`)
+ fflib.WriteJsonString(buf, string(j.MountPoint))
+ buf.WriteString(`,"count":`)
+ fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0)
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffjtlayerMountPointbase = iota
+ ffjtlayerMountPointnosuchkey
+
+ ffjtlayerMountPointID
+
+ ffjtlayerMountPointMountPoint
+
+ ffjtlayerMountPointMountCount
+)
+
+var ffjKeylayerMountPointID = []byte("id")
+
+var ffjKeylayerMountPointMountPoint = []byte("path")
+
+var ffjKeylayerMountPointMountCount = []byte("count")
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerMountPoint) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerMountPointbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) {
+ currentKey = ffjtlayerMountPointMountCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) {
+ currentKey = ffjtlayerMountPointMountPoint
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) {
+ currentKey = ffjtlayerMountPointID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffjtlayerMountPointnosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerMountPointID:
+ goto handle_ID
+
+ case ffjtlayerMountPointMountPoint:
+ goto handle_MountPoint
+
+ case ffjtlayerMountPointMountCount:
+ goto handle_MountCount
+
+ case ffjtlayerMountPointnosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ID:
+
+ /* handler: j.ID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.ID = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountPoint:
+
+ /* handler: j.MountPoint type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.MountPoint = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MountCount:
+
+ /* handler: j.MountCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ j.MountCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *layerStore) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtlayerStorebase = iota
+ ffjtlayerStorenosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *layerStore) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtlayerStorebase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtlayerStorenosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtlayerStorenosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
+
+// MarshalJSON marshal bytes to json - template
+func (j *simpleGetCloser) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if j == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := j.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshal buff to json - template
+func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if j == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{}`)
+ return nil
+}
+
+const (
+ ffjtsimpleGetCloserbase = iota
+ ffjtsimpleGetClosernosuchkey
+)
+
+// UnmarshalJSON umarshall json - template of ffjson
+func (j *simpleGetCloser) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+// UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjtsimpleGetCloserbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ }
+
+ currentKey = ffjtsimpleGetClosernosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffjtsimpleGetClosernosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
index c001fbecb..d28ba9d69 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
@@ -1,23 +1,96 @@
-// +build linux
-
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
+ "errors"
"os"
-
- "github.com/containers/storage/pkg/idtools"
+ "path/filepath"
+ "strings"
)
-// GetStatic returns the home directory for the current user without calling
-// os/user.Current(). This is useful for static-linked binary on glibc-based
-// system, because a call to os/user.Current() in a static binary leads to
-// segfault due to a glibc issue that won't be fixed in a short term.
-// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
-func GetStatic() (string, error) {
- uid := os.Getuid()
- usr, err := idtools.LookupUID(uid)
+// GetRuntimeDir returns XDG_RUNTIME_DIR.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetRuntimeDir() (string, error) {
+ if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
+ return xdgRuntimeDir, nil
+ }
+ return "", errors.New("could not get XDG_RUNTIME_DIR")
+}
+
+// StickRuntimeDirContents sets the sticky bit on files that are under
+// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
+//
+// StickyRuntimeDir returns slice of sticked files.
+// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ runtimeDir, err := GetRuntimeDir()
if err != nil {
- return "", err
+ // ignore error if runtimeDir is empty
+ return nil, nil
+ }
+ runtimeDir, err = filepath.Abs(runtimeDir)
+ if err != nil {
+ return nil, err
+ }
+ var sticked []string
+ for _, f := range files {
+ f, err = filepath.Abs(f)
+ if err != nil {
+ return sticked, err
+ }
+ if strings.HasPrefix(f, runtimeDir+"/") {
+ if err = stick(f); err != nil {
+ return sticked, err
+ }
+ sticked = append(sticked, f)
+ }
+ }
+ return sticked, nil
+}
+
+func stick(f string) error {
+ st, err := os.Stat(f)
+ if err != nil {
+ return err
+ }
+ m := st.Mode()
+ m |= os.ModeSticky
+ return os.Chmod(f, m)
+}
+
+// GetDataHome returns XDG_DATA_HOME.
+// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetDataHome() (string, error) {
+ if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
+ return xdgDataHome, nil
+ }
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", errors.New("could not get either XDG_DATA_HOME or HOME")
+ }
+ return filepath.Join(home, ".local", "share"), nil
+}
+
+// GetConfigHome returns XDG_CONFIG_HOME.
+// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetConfigHome() (string, error) {
+ if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
+ return xdgConfigHome, nil
+ }
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
}
- return usr.Home, nil
+ return filepath.Join(home, ".config"), nil
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
index 6b96b856f..f7bcfb878 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
@@ -2,12 +2,29 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"errors"
)
-// GetStatic is not needed for non-linux systems.
-// (Precisely, it is needed only for glibc-based linux systems.)
-func GetStatic() (string, error) {
- return "", errors.New("homedir.GetStatic() is not supported on this system")
+// GetRuntimeDir is unsupported on non-linux system.
+func GetRuntimeDir() (string, error) {
+ return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
+}
+
+// StickRuntimeDirContents is unsupported on non-linux system.
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
+}
+
+// GetDataHome is unsupported on non-linux system.
+func GetDataHome() (string, error) {
+ return "", errors.New("homedir.GetDataHome() is not supported on this system")
+}
+
+// GetConfigHome is unsupported on non-linux system.
+func GetConfigHome() (string, error) {
+ return "", errors.New("homedir.GetConfigHome() is not supported on this system")
}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index f2a20ea8f..dcadb7e8d 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -2,10 +2,12 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"os"
-
- "github.com/opencontainers/runc/libcontainer/user"
+ "os/user"
)
// Key returns the env var name for the user's home dir based on
@@ -17,11 +19,16 @@ func Key() string {
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
+//
+// If linking statically with cgo enabled against glibc, ensure the
+// osusergo build tag is used.
+//
+// If needing to do nss lookups, do not disable cgo or set osusergo.
func Get() string {
home := os.Getenv(Key())
if home == "" {
- if u, err := user.CurrentUser(); err == nil {
- return u.Home
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
}
}
return home
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
index fafdb2bbf..4f2615ed3 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
@@ -1,5 +1,8 @@
package homedir
+// Copyright 2013-2018 Docker, Inc.
+// NOTE: this package has originally been copied from github.com/docker/docker.
+
import (
"os"
)
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
index 1ae728a61..372bee732 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -5,9 +5,6 @@ package reexec
import (
"context"
"os/exec"
- "syscall"
-
- "golang.org/x/sys/unix"
)
// Self returns the path to the current process's binary.
@@ -16,28 +13,20 @@ func Self() string {
return "/proc/self/exe"
}
-// Command returns *exec.Cmd which has Path as current binary. Also it setting
-// SysProcAttr.Pdeathsig to SIGTERM.
+// Command returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
cmd := exec.Command(Self())
cmd.Args = args
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Pdeathsig: unix.SIGTERM,
- }
return cmd
}
-// CommandContext returns *exec.Cmd which has Path as current binary, and also
-// sets SysProcAttr.Pdeathsig to SIGTERM.
+// CommandContext returns *exec.Cmd which has Path as current binary.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Pdeathsig: unix.SIGTERM,
- }
return cmd
}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index b84be4424..d978c476d 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/storage/pkg/archive"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/directory"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
@@ -3275,9 +3276,9 @@ const defaultConfigFile = "/etc/containers/storage.conf"
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
if rootless {
- home, err := homeDir()
- if err != nil {
- return "", errors.Wrapf(err, "cannot determine users homedir")
+ home := homedir.Get()
+ if home == "" {
+ return "", errors.New("cannot determine user's homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 28e0a8d6d..7e4b27d0f 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -4,12 +4,12 @@ import (
"fmt"
"os"
"os/exec"
- "os/user"
"path/filepath"
"strconv"
"strings"
"github.com/BurntSushi/toml"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
@@ -82,9 +82,8 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
}
func getRootlessRuntimeDir(rootlessUID int) (string, error) {
- runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
-
- if runtimeDir != "" {
+ runtimeDir, err := homedir.GetRuntimeDir()
+ if err == nil {
return runtimeDir, nil
}
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUID)
@@ -98,8 +97,8 @@ func getRootlessRuntimeDir(rootlessUID int) (string, error) {
} else {
return tmpDir, nil
}
- home, err := homeDir()
- if err != nil {
+ home := homedir.Get()
+ if home == "" {
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
}
resolvedHome, err := filepath.EvalSymlinks(home)
@@ -117,20 +116,23 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) {
return "", "", err
}
- dataDir := os.Getenv("XDG_DATA_HOME")
- if dataDir == "" {
- home, err := homeDir()
- if err != nil {
- return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
- }
- // runc doesn't like symlinks in the rootfs path, and at least
- // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- dataDir = filepath.Join(resolvedHome, ".local", "share")
+ dataDir, err := homedir.GetDataHome()
+ if err == nil {
+ return dataDir, rootlessRuntime, nil
+ }
+
+ home := homedir.Get()
+ if home == "" {
+ return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ // runc doesn't like symlinks in the rootfs path, and at least
+ // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "cannot resolve %s", home)
}
+ dataDir = filepath.Join(resolvedHome, ".local", "share")
+
return dataDir, rootlessRuntime, nil
}
@@ -246,15 +248,3 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
}
return storageOpts, nil
}
-
-func homeDir() (string, error) {
- home := os.Getenv("HOME")
- if home == "" {
- usr, err := user.Current()
- if err != nil {
- return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- home = usr.HomeDir
- }
- return home, nil
-}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index b0a470f92..3d2fdcd77 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -42,10 +42,10 @@ const (
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
maxMatchOffset = 1 << 15 // The largest match offset
- bTableBits = 18 // Bits used in the big tables
- bTableSize = 1 << bTableBits // Size of the table
- allocHistory = maxMatchOffset * 10 // Size to preallocate for history.
- bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this.
+ bTableBits = 18 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 20 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)
const (
@@ -210,16 +210,14 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
// Reset the encoding table.
func (e *fastGen) Reset() {
- if cap(e.hist) < int(maxMatchOffset*8) {
- l := maxMatchOffset * 8
- // Make it at least 1MB.
- if l < 1<<20 {
- l = 1 << 20
- }
- e.hist = make([]byte, 0, l)
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
}
- // We offset current position so everything will be out of reach
- e.cur += maxMatchOffset + int32(len(e.hist))
e.hist = e.hist[:0]
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 9feea87a3..56ee6dc8b 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -177,6 +177,11 @@ func (w *huffmanBitWriter) flush() {
w.nbits = 0
return
}
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
n := w.nbytes
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
@@ -594,8 +599,8 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
tokens.AddEOB()
}
- // We cannot reuse pure huffman table.
- if w.lastHuffMan && w.lastHeader > 0 {
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
// We will not try to reuse.
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
index 20de8f11f..102fc74c7 100644
--- a/vendor/github.com/klauspost/compress/flate/level1.go
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
@@ -14,6 +16,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
index 7c824431e..dc6b1d314 100644
--- a/vendor/github.com/klauspost/compress/flate/level2.go
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
@@ -16,6 +18,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
index 4153d24c9..1a3ff9b6b 100644
--- a/vendor/github.com/klauspost/compress/flate/level3.go
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -1,5 +1,7 @@
package flate
+import "fmt"
+
// fastEncL3
type fastEncL3 struct {
fastGen
@@ -13,6 +15,10 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
index c689ac771..f3ecc9c4d 100644
--- a/vendor/github.com/klauspost/compress/flate/level4.go
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -13,7 +13,9 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
-
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
index 14a235612..4e3916825 100644
--- a/vendor/github.com/klauspost/compress/flate/level5.go
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -13,6 +13,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
index cad0c7df7..00a311977 100644
--- a/vendor/github.com/klauspost/compress/flate/level6.go
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -13,6 +13,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
+ if debugDecode && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index a47051197..53e899124 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -8,6 +8,8 @@ import (
const (
maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
slTableBits = 13
slTableSize = 1 << slTableBits
@@ -25,11 +27,11 @@ func (s *statelessWriter) Close() error {
}
s.closed = true
// Emit EOF block
- return StatelessDeflate(s.dst, nil, true)
+ return StatelessDeflate(s.dst, nil, true, nil)
}
func (s *statelessWriter) Write(p []byte) (n int, err error) {
- err = StatelessDeflate(s.dst, p, false)
+ err = StatelessDeflate(s.dst, p, false, nil)
if err != nil {
return 0, err
}
@@ -59,7 +61,10 @@ var bitWriterPool = sync.Pool{
// StatelessDeflate allows to compress directly to a Writer without retaining state.
// When returning everything will be flushed.
-func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
+// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
@@ -76,35 +81,53 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
return bw.err
}
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
for len(in) > 0 {
todo := in
- if len(todo) > maxStatelessBlock {
- todo = todo[:maxStatelessBlock]
+ if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
}
in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
// Compress
- statelessEnc(&dst, todo)
+ statelessEnc(&dst, todo, int16(len(dict)))
isEof := eof && len(in) == 0
if dst.n == 0 {
- bw.writeStoredHeader(len(todo), isEof)
+ bw.writeStoredHeader(len(uncompressed), isEof)
if bw.err != nil {
return bw.err
}
- bw.writeBytes(todo)
- } else if int(dst.n) > len(todo)-len(todo)>>4 {
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
// If we removed less than 1/16th, huffman compress the block.
- bw.writeBlockHuff(isEof, todo, false)
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
- bw.writeBlockDynamic(&dst, isEof, todo, false)
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ dict = todo[len(todo)-maxStatelessDict:]
+ dst.Reset()
}
if bw.err != nil {
return bw.err
}
- dst.Reset()
}
if !eof {
- // Align.
+ // Align, only a stored block can do that.
bw.writeStoredHeader(0, false)
}
bw.flush()
@@ -130,7 +153,7 @@ func load6416(b []byte, i int16) uint64 {
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
-func statelessEnc(dst *tokens, src []byte) {
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
@@ -144,15 +167,23 @@ func statelessEnc(dst *tokens, src []byte) {
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
- dst.n = uint16(len(src))
+ dst.n = 0
return
}
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
- s := int16(1)
- nextEmit := int16(0)
+ s := startAt + 1
+ nextEmit := startAt
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
diff --git a/vendor/github.com/mattn/go-shellwords/util_go15.go b/vendor/github.com/mattn/go-shellwords/util_go15.go
deleted file mode 100644
index ddcbf229e..000000000
--- a/vendor/github.com/mattn/go-shellwords/util_go15.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build !go1.6
-
-package shellwords
-
-import (
- "os"
- "os/exec"
- "runtime"
- "strings"
-)
-
-func shellRun(line, dir string) (string, error) {
- var b []byte
- var err error
- var cmd *exec.Cmd
- if runtime.GOOS == "windows" {
- cmd = exec.Command(os.Getenv("COMSPEC"), "/c", line)
- } else {
- cmd = exec.Command(os.Getenv("SHELL"), "-c", line)
- }
- if dir != "" {
- cmd.Dir = dir
- }
- b, err = cmd.Output()
- if err != nil {
- return "", err
- }
- return strings.TrimSpace(string(b)), nil
-}
diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go
index 3aef2c4d7..988fc9ed2 100644
--- a/vendor/github.com/mattn/go-shellwords/util_posix.go
+++ b/vendor/github.com/mattn/go-shellwords/util_posix.go
@@ -1,4 +1,4 @@
-// +build !windows,go1.6
+// +build !windows
package shellwords
@@ -10,7 +10,10 @@ import (
)
func shellRun(line, dir string) (string, error) {
- shell := os.Getenv("SHELL")
+ var shell string
+ if shell = os.Getenv("SHELL"); shell == "" {
+ shell = "/bin/sh"
+ }
cmd := exec.Command(shell, "-c", line)
if dir != "" {
cmd.Dir = dir
diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go
index cda685091..20546737c 100644
--- a/vendor/github.com/mattn/go-shellwords/util_windows.go
+++ b/vendor/github.com/mattn/go-shellwords/util_windows.go
@@ -1,4 +1,4 @@
-// +build windows,go1.6
+// +build windows
package shellwords
@@ -10,7 +10,10 @@ import (
)
func shellRun(line, dir string) (string, error) {
- shell := os.Getenv("COMSPEC")
+ var shell string
+ if shell = os.Getenv("COMSPEC"); shell == "" {
+ shell = "cmd"
+ }
cmd := exec.Command(shell, "/c", line)
if dir != "" {
cmd.Dir = dir
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index e0364e9e7..bf89ecd21 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -32,7 +32,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args
return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -160,7 +161,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
return False(t, value, append([]interface{}{msg}, args...)...)
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -267,7 +269,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -325,14 +327,6 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// YAMLEqf asserts that two YAML strings are equivalent.
-func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
-}
-
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
@@ -369,6 +363,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
// Nilf asserts that the specified object is nil.
//
// assert.Nilf(t, err, "error message %s", "formatted")
@@ -379,6 +384,15 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool
return Nil(t, object, append([]interface{}{msg}, args...)...)
}
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -392,6 +406,15 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
return NoError(t, err, append([]interface{}{msg}, args...)...)
}
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -462,6 +485,19 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -491,6 +527,18 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool
return Panics(t, f, append([]interface{}{msg}, args...)...)
}
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
+}
+
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -557,6 +605,14 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
// Zerof asserts that i is the zero value for its type.
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 26830403a..75ecdcaa2 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -53,7 +53,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string,
return Containsf(a.t, s, contains, msg, args...)
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -61,7 +62,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
return DirExists(a.t, path, msgAndArgs...)
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -309,7 +311,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
return Falsef(a.t, value, msg, args...)
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -317,7 +320,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
return FileExists(a.t, path, msgAndArgs...)
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -521,7 +525,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}
// InDelta asserts that the two numerals are within delta of each other.
//
-// a.InDelta(math.Pi, (22 / 7.0), 0.01)
+// a.InDelta(math.Pi, 22/7.0, 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -563,7 +567,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -639,22 +643,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
return JSONEqf(a.t, expected, actual, msg, args...)
}
-// YAMLEq asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- return YAMLEqf(a.t, expected, actual, msg, args...)
-}
-
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@@ -727,6 +715,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
return Lessf(a.t, e1, e2, msg, args...)
}
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
// Nil asserts that the specified object is nil.
//
// a.Nil(err)
@@ -747,6 +757,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) b
return Nilf(a.t, object, msg, args...)
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExistsf(a.t, path, msg, args...)
+}
+
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -773,6 +801,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
return NoErrorf(a.t, err, msg, args...)
}
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExistsf(a.t, path, msg, args...)
+}
+
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -913,6 +959,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg
return NotRegexpf(a.t, rx, str, msg, args...)
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSamef(a.t, expected, actual, msg, args...)
+}
+
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -961,6 +1033,30 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -1103,6 +1199,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
// Zero asserts that i is the zero value for its type.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 044da8b01..bdd81389a 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -11,6 +11,7 @@ import (
"reflect"
"regexp"
"runtime"
+ "runtime/debug"
"strings"
"time"
"unicode"
@@ -21,7 +22,7 @@ import (
yaml "gopkg.in/yaml.v2"
)
-//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
@@ -351,6 +352,19 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
}
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if expected == nil && actual == nil {
+ return nil
+ }
+
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
// Same asserts that two pointers reference the same object.
//
// assert.Same(t, ptr1, ptr2)
@@ -362,18 +376,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
h.Helper()
}
- expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual)
- if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr {
- return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...)
- }
-
- expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual)
- if expectedType != actualType {
- return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v",
- expectedType, actualType), msgAndArgs...)
- }
-
- if expected != actual {
+ if !samePointers(expected, actual) {
return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
@@ -382,6 +385,42 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
return true
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf(
+ "Expected and actual point to the same object: %p %#v",
+ expected, expected), msgAndArgs...)
+ }
+ return true
+}
+
+// samePointers compares two generic interface objects and returns whether
+// they point to the same object
+func samePointers(first, second interface{}) bool {
+ firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
+ if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
+ return false
+ }
+
+ firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
+ if firstType != secondType {
+ return false
+ }
+
+ // compare pointer addresses
+ return first == second
+}
+
// formatUnequalValues takes two values of arbitrary types and returns string
// representations appropriate to be presented to the user.
//
@@ -393,9 +432,11 @@ func formatUnequalValues(expected, actual interface{}) (e string, a string) {
return fmt.Sprintf("%T(%#v)", expected, expected),
fmt.Sprintf("%T(%#v)", actual, actual)
}
-
- return fmt.Sprintf("%#v", expected),
- fmt.Sprintf("%#v", actual)
+ switch expected.(type) {
+ case time.Duration:
+ return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
+ }
+ return fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual)
}
// EqualValues asserts that two objects are equal or convertable to the same types
@@ -901,15 +942,17 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
type PanicTestFunc func()
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
-func didPanic(f PanicTestFunc) (bool, interface{}) {
+func didPanic(f PanicTestFunc) (bool, interface{}, string) {
didPanic := false
var message interface{}
+ var stack string
func() {
defer func() {
if message = recover(); message != nil {
didPanic = true
+ stack = string(debug.Stack())
}
}()
@@ -918,7 +961,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) {
}()
- return didPanic, message
+ return didPanic, message, stack
}
@@ -930,7 +973,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
h.Helper()
}
- if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+ if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
}
@@ -946,12 +989,34 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr
h.Helper()
}
- funcDidPanic, panicValue := didPanic(f)
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
if !funcDidPanic {
return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
}
if panicValue != expected {
- return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ panicErr, ok := panicValue.(error)
+ if !ok || panicErr.Error() != errString {
+ return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
}
return true
@@ -965,8 +1030,8 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
h.Helper()
}
- if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
- return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
}
return true
@@ -1026,7 +1091,7 @@ func toFloat(x interface{}) (float64, bool) {
// InDelta asserts that the two numerals are within delta of each other.
//
-// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1314,7 +1379,8 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
return true
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1332,7 +1398,24 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
return true
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ return true
+ }
+ if info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1350,6 +1433,25 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
return true
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return true
+ }
+ return true
+ }
+ if !info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
+}
+
// JSONEq asserts that two JSON strings are equivalent.
//
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@@ -1439,15 +1541,6 @@ func diff(expected interface{}, actual interface{}) string {
return "\n\nDiff:\n" + diff
}
-// validateEqualArgs checks whether provided arguments can be safely used in the
-// Equal/NotEqual functions.
-func validateEqualArgs(expected, actual interface{}) error {
- if isFunction(expected) || isFunction(actual) {
- return errors.New("cannot take func type as argument")
- }
- return nil
-}
-
func isFunction(arg interface{}) bool {
if arg == nil {
return false
@@ -1475,24 +1568,59 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
h.Helper()
}
+ ch := make(chan bool, 1)
+
timer := time.NewTimer(waitFor)
- ticker := time.NewTicker(tick)
- checkPassed := make(chan bool)
defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
defer ticker.Stop()
- defer close(checkPassed)
- for {
+
+ for tick := ticker.C; ; {
select {
case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...)
- case result := <-checkPassed:
- if result {
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
return true
}
- case <-ticker.C:
- go func() {
- checkPassed <- condition()
- }()
+ tick = ticker.C
+ }
+ }
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return true
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return Fail(t, "Condition satisfied", msgAndArgs...)
+ }
+ tick = ticker.C
}
}
}
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
index 9ad56851d..df189d234 100644
--- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
}
}
-//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go
index ac71d4058..1dcb2338c 100644
--- a/vendor/github.com/stretchr/testify/require/forward_requirements.go
+++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
}
}
-//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index c5903f5db..cf6c7b566 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -66,7 +66,8 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args
t.FailNow()
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -77,7 +78,8 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
t.FailNow()
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -275,12 +277,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) {
//
// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
- if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) {
- return
- }
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) {
+ return
+ }
t.FailNow()
}
@@ -289,12 +291,12 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
//
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
- if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) {
- return
- }
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) {
+ return
+ }
t.FailNow()
}
@@ -394,7 +396,8 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) {
t.FailNow()
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -405,7 +408,8 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
t.FailNow()
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -660,7 +664,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDelta asserts that the two numerals are within delta of each other.
//
-// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -717,7 +721,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -820,28 +824,6 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
t.FailNow()
}
-// YAMLEq asserts that two YAML strings are equivalent.
-func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.YAMLEq(t, expected, actual, msgAndArgs...) {
- return
- }
- t.FailNow()
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if assert.YAMLEqf(t, expected, actual, msg, args...) {
- return
- }
- t.FailNow()
-}
-
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@@ -932,6 +914,34 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
t.FailNow()
}
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Never(t, condition, waitFor, tick, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.Neverf(t, condition, waitFor, tick, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// Nil asserts that the specified object is nil.
//
// assert.Nil(t, err)
@@ -958,6 +968,30 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) {
t.FailNow()
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoDirExists(t, path, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoDirExistsf(t, path, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -990,6 +1024,30 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) {
t.FailNow()
}
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoFileExists(t, path, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NoFileExistsf(t, path, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -1166,6 +1224,38 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
t.FailNow()
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotSame(t, expected, actual, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotSamef(t, expected, actual, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -1229,6 +1319,36 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
t.FailNow()
}
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.PanicsWithError(t, errString, f, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.PanicsWithErrorf(t, errString, f, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -1410,6 +1530,28 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
t.FailNow()
}
+// YAMLEq asserts that two YAML strings are equivalent.
+func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.YAMLEq(t, expected, actual, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.YAMLEqf(t, expected, actual, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// Zero asserts that i is the zero value for its type.
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 804fae035..5aac226df 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -54,7 +54,8 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string,
Containsf(a.t, s, contains, msg, args...)
}
-// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -62,7 +63,8 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
DirExists(a.t, path, msgAndArgs...)
}
-// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -310,7 +312,8 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) {
Falsef(a.t, value, msg, args...)
}
-// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -318,7 +321,8 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
FileExists(a.t, path, msgAndArgs...)
}
-// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -522,7 +526,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}
// InDelta asserts that the two numerals are within delta of each other.
//
-// a.InDelta(math.Pi, (22 / 7.0), 0.01)
+// a.InDelta(math.Pi, 22/7.0, 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -564,7 +568,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -640,22 +644,6 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
JSONEqf(a.t, expected, actual, msg, args...)
}
-// YAMLEq asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- YAMLEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// YAMLEqf asserts that two YAML strings are equivalent.
-func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
- if h, ok := a.t.(tHelper); ok {
- h.Helper()
- }
- YAMLEqf(a.t, expected, actual, msg, args...)
-}
-
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@@ -728,6 +716,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
Lessf(a.t, e1, e2, msg, args...)
}
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
// Nil asserts that the specified object is nil.
//
// a.Nil(err)
@@ -748,6 +758,24 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) {
Nilf(a.t, object, msg, args...)
}
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoDirExistsf(a.t, path, msg, args...)
+}
+
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
@@ -774,6 +802,24 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) {
NoErrorf(a.t, err, msg, args...)
}
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NoFileExistsf(a.t, path, msg, args...)
+}
+
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
@@ -914,6 +960,32 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg
NotRegexpf(a.t, rx, str, msg, args...)
}
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotSamef(a.t, expected, actual, msg, args...)
+}
+
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
@@ -962,6 +1034,30 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
Panics(a.t, f, msgAndArgs...)
}
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
@@ -1104,6 +1200,22 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta
WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
// Zero asserts that i is the zero value for its type.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go
index 6b85c5ece..91772dfeb 100644
--- a/vendor/github.com/stretchr/testify/require/requirements.go
+++ b/vendor/github.com/stretchr/testify/require/requirements.go
@@ -26,4 +26,4 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{})
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{})
-//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 814ea264e..9fd805077 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -141,7 +141,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.15.8
+# github.com/containers/storage v1.16.0
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -313,7 +313,7 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.9
github.com/json-iterator/go
-# github.com/klauspost/compress v1.9.8
+# github.com/klauspost/compress v1.10.0
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -324,7 +324,7 @@ github.com/klauspost/compress/zstd/internal/xxhash
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.2
github.com/konsorten/go-windows-terminal-sequences
-# github.com/mattn/go-shellwords v1.0.9
+# github.com/mattn/go-shellwords v1.0.10
github.com/mattn/go-shellwords
# github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/matttproud/golang_protobuf_extensions/pbutil
@@ -470,7 +470,7 @@ github.com/sirupsen/logrus/hooks/syslog
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
github.com/spf13/pflag
-# github.com/stretchr/testify v1.4.0
+# github.com/stretchr/testify v1.5.0
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2