summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml3
-rw-r--r--CONTRIBUTING.md11
-rw-r--r--cmd/podman/containers/ps.go8
-rw-r--r--cmd/podman/inspect/inspect.go21
-rw-r--r--cmd/podman/manifest/manifest.go2
-rw-r--r--cmd/podman/manifest/push.go2
-rw-r--r--cmd/podman/networks/list.go4
-rw-r--r--cmd/podman/play/kube.go5
-rw-r--r--cmd/podman/system/service_abi.go27
-rw-r--r--contrib/gate/Dockerfile41
-rw-r--r--contrib/gate/README.md6
-rwxr-xr-xcontrib/gate/entrypoint.sh23
-rw-r--r--docs/source/markdown/podman-play-kube.1.md4
-rw-r--r--docs/tutorials/image_signing.md26
-rw-r--r--docs/tutorials/mac_win_client.md4
-rw-r--r--docs/tutorials/podman_tutorial.md14
-rw-r--r--docs/tutorials/remote_client.md18
-rw-r--r--docs/tutorials/rootless_tutorial.md8
-rw-r--r--libpod/boltdb_state.go18
-rw-r--r--libpod/container.go14
-rw-r--r--libpod/define/pod_inspect.go2
-rw-r--r--libpod/filters/containers.go163
-rw-r--r--libpod/network/create.go43
-rw-r--r--libpod/networking_linux.go138
-rw-r--r--libpod/pod_api.go2
-rw-r--r--pkg/api/handlers/compat/containers_prune.go14
-rw-r--r--pkg/api/handlers/compat/networks.go63
-rw-r--r--pkg/api/handlers/libpod/play.go5
-rw-r--r--pkg/api/server/register_play.go5
-rw-r--r--pkg/bindings/play/play.go5
-rw-r--r--pkg/domain/entities/play.go2
-rw-r--r--pkg/domain/infra/abi/containers.go12
-rw-r--r--pkg/domain/infra/abi/network.go6
-rw-r--r--pkg/domain/infra/abi/play.go26
-rw-r--r--pkg/domain/infra/runtime_libpod.go24
-rw-r--r--pkg/ps/ps.go16
-rw-r--r--pkg/util/utils.go12
-rw-r--r--test/apiv2/rest_api/test_rest_v2_0_0.py14
-rw-r--r--test/e2e/play_kube_test.go15
-rw-r--r--test/e2e/pod_inspect_test.go19
-rw-r--r--test/e2e/pod_pod_namespaces_test.go (renamed from test/e2e/pod_pod_namespaces.go)0
-rw-r--r--test/e2e/ps_test.go122
-rw-r--r--test/e2e/run_seccomp_test.go (renamed from test/e2e/run_seccomp.go)0
-rw-r--r--test/e2e/run_security_labels_test.go (renamed from test/e2e/run_security_labels.go)0
-rw-r--r--test/e2e/run_working_dir_test.go (renamed from test/e2e/run_working_dir.go)0
-rw-r--r--test/e2e/search_test.go4
-rw-r--r--test/python/docker/test_containers.py10
-rw-r--r--troubleshooting.md2
48 files changed, 635 insertions, 348 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 4156e3082..5f99b0490 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -362,7 +362,8 @@ static_alt_build_task:
# this cache ensures only the static podman binary is built.
nix_cache:
folder: '/var/cache/nix'
- fingerprint_script: cat nix/*
+ # Cirrus will calculate/use sha of this output as the cache key
+ fingerprint_script: echo "${IMAGE_SUFFIX}" && cat nix/*
setup_script: *setup
main_script: *main
always: *binary_artifacts
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1d2c26750..30fddf82b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -286,16 +286,7 @@ commit automatically with `git commit -s`.
### Go Format and lint
-All code changes must pass ``make validate`` and ``make lint``, as
-executed in a standard container. The container image for this
-purpose is provided at: ``quay.io/libpod/gate:master``. With
-other tags available for different branches as needed. These
-images are built automatically after merges to the branch.
-
-#### Building the gate container locally
-
-For local use, debugging, or experimentation, the gate image may
-be built locally from the repository root, with the command:
+All code changes must pass ``make validate`` and ``make lint``.
```
podman build -t gate -f contrib/gate/Dockerfile .
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index a9e2d2e35..642feb5e0 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -98,14 +98,6 @@ func checkFlags(c *cobra.Command) error {
if listOpts.Last >= 0 && listOpts.Latest {
return errors.Errorf("last and latest are mutually exclusive")
}
- // Filter on status forces all
- for _, filter := range filters {
- splitFilter := strings.SplitN(filter, "=", 2)
- if strings.ToLower(splitFilter[0]) == "status" {
- listOpts.All = true
- break
- }
- }
// Quiet conflicts with size and namespace and is overridden by a Go
// template.
if listOpts.Quiet {
diff --git a/cmd/podman/inspect/inspect.go b/cmd/podman/inspect/inspect.go
index f9bd75c93..13f36ebbd 100644
--- a/cmd/podman/inspect/inspect.go
+++ b/cmd/podman/inspect/inspect.go
@@ -2,6 +2,7 @@ package inspect
import (
"context"
+ "encoding/json" // due to a bug in json-iterator it cannot be used here
"fmt"
"os"
"regexp"
@@ -28,17 +29,14 @@ const (
ContainerType = "container"
// ImageType is the image type.
ImageType = "image"
- //NetworkType is the network type
+ // NetworkType is the network type
NetworkType = "network"
- //PodType is the pod type.
+ // PodType is the pod type.
PodType = "pod"
- //VolumeType is the volume type
+ // VolumeType is the volume type
VolumeType = "volume"
)
-// Pull in configured json library
-var json = registry.JSONLibrary()
-
// AddInspectFlagSet takes a command and adds the inspect flags and returns an
// InspectOptions object.
func AddInspectFlagSet(cmd *cobra.Command) *entities.InspectOptions {
@@ -173,7 +171,7 @@ func (i *inspector) inspect(namesOrIDs []string) error {
data = append(data, podData)
}
}
- if i.podOptions.Latest { //latest means there are no names in the namesOrID array
+ if i.podOptions.Latest { // latest means there are no names in the namesOrID array
podData, err := i.containerEngine.PodInspect(ctx, i.podOptions)
if err != nil {
cause := errors.Cause(err)
@@ -238,9 +236,12 @@ func (i *inspector) inspect(namesOrIDs []string) error {
}
func printJSON(data []interface{}) error {
- enc := json.NewEncoder(os.Stdout)
- enc.SetIndent("", " ")
- return enc.Encode(data)
+ buf, err := json.MarshalIndent(data, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Println(string(buf))
+ return err
}
func printTmpl(typ, row string, data []interface{}) error {
diff --git a/cmd/podman/manifest/manifest.go b/cmd/podman/manifest/manifest.go
index c3bcdc8c7..990ad0e95 100644
--- a/cmd/podman/manifest/manifest.go
+++ b/cmd/podman/manifest/manifest.go
@@ -18,7 +18,7 @@ var (
podman manifest create localhost/list
podman manifest inspect localhost/list
podman manifest annotate --annotation left=right mylist:v1.11 image:v1.11-amd64
- podman manifest push mylist:v1.11 quay.io/myimagelist
+ podman manifest push mylist:v1.11 docker://quay.io/myuser/image:v1.11
podman manifest remove mylist:v1.11 sha256:15352d97781ffdf357bf3459c037be3efac4133dc9070c2dce7eca7c05c3e736`,
}
)
diff --git a/cmd/podman/manifest/push.go b/cmd/podman/manifest/push.go
index 593d62710..a3b469491 100644
--- a/cmd/podman/manifest/push.go
+++ b/cmd/podman/manifest/push.go
@@ -28,7 +28,7 @@ var (
Short: "Push a manifest list or image index to a registry",
Long: "Pushes manifest lists and image indexes to registries.",
RunE: push,
- Example: `podman manifest push mylist:v1.11 quay.io/myimagelist`,
+ Example: `podman manifest push mylist:v1.11 docker://quay.io/myuser/image:v1.11`,
Args: cobra.ExactArgs(2),
ValidArgsFunction: common.AutocompleteImages,
}
diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go
index bab6b45ea..f2a5a431a 100644
--- a/cmd/podman/networks/list.go
+++ b/cmd/podman/networks/list.go
@@ -87,8 +87,11 @@ func networkList(cmd *cobra.Command, args []string) error {
nlprs = append(nlprs, ListPrintReports{r})
}
+ // Headers() gets lost resolving the embedded field names so add them
headers := report.Headers(ListPrintReports{}, map[string]string{
+ "Name": "name",
"CNIVersion": "version",
+ "Version": "version",
"Plugins": "plugins",
})
renderHeaders := true
@@ -110,7 +113,6 @@ func networkList(cmd *cobra.Command, args []string) error {
if err := tmpl.Execute(w, headers); err != nil {
return err
}
-
}
return tmpl.Execute(w, nlprs)
}
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index a9e91bd68..db70ad7d4 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -22,6 +22,7 @@ type playKubeOptionsWrapper struct {
TLSVerifyCLI bool
CredentialsCLI string
+ StartCLI bool
}
var (
@@ -68,6 +69,7 @@ func init() {
flags.BoolVarP(&kubeOptions.Quiet, "quiet", "q", false, "Suppress output information when pulling images")
flags.BoolVar(&kubeOptions.TLSVerifyCLI, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
+ flags.BoolVar(&kubeOptions.StartCLI, "start", true, "Start the pod after creating it")
authfileFlagName := "authfile"
flags.StringVar(&kubeOptions.Authfile, authfileFlagName, auth.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
@@ -100,6 +102,9 @@ func kube(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("tls-verify") {
kubeOptions.SkipTLSVerify = types.NewOptionalBool(!kubeOptions.TLSVerifyCLI)
}
+ if cmd.Flags().Changed("start") {
+ kubeOptions.Start = types.NewOptionalBool(kubeOptions.StartCLI)
+ }
if kubeOptions.Authfile != "" {
if _, err := os.Stat(kubeOptions.Authfile); err != nil {
return err
diff --git a/cmd/podman/system/service_abi.go b/cmd/podman/system/service_abi.go
index 84f9293d4..8c52616be 100644
--- a/cmd/podman/system/service_abi.go
+++ b/cmd/podman/system/service_abi.go
@@ -5,12 +5,8 @@ package system
import (
"context"
"net"
- "os"
- "os/signal"
"strings"
- "github.com/containers/podman/v2/cmd/podman/utils"
- "github.com/containers/podman/v2/libpod"
api "github.com/containers/podman/v2/pkg/api/server"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/domain/infra"
@@ -43,7 +39,7 @@ func restService(opts entities.ServiceOptions, flags *pflag.FlagSet, cfg *entiti
return err
}
- startWatcher(rt)
+ infra.StartWatcher(rt)
server, err := api.NewServerWithSettings(rt, opts.Timeout, listener)
if err != nil {
return err
@@ -60,24 +56,3 @@ func restService(opts entities.ServiceOptions, flags *pflag.FlagSet, cfg *entiti
}
return err
}
-
-// startWatcher starts a new SIGHUP go routine for the current config.
-func startWatcher(rt *libpod.Runtime) {
- // Setup the signal notifier
- ch := make(chan os.Signal, 1)
- signal.Notify(ch, utils.SIGHUP)
-
- go func() {
- for {
- // Block until the signal is received
- logrus.Debugf("waiting for SIGHUP to reload configuration")
- <-ch
- if err := rt.Reload(); err != nil {
- logrus.Errorf("unable to reload configuration: %v", err)
- continue
- }
- }
- }()
-
- logrus.Debugf("registered SIGHUP watcher for config")
-}
diff --git a/contrib/gate/Dockerfile b/contrib/gate/Dockerfile
deleted file mode 100644
index 0a4d57416..000000000
--- a/contrib/gate/Dockerfile
+++ /dev/null
@@ -1,41 +0,0 @@
-FROM fedora:32
-
-ENV GOPATH="/var/tmp/go" \
- GOBIN="/var/tmp/go/bin" \
- PATH="/var/tmp/go/bin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" \
- SRCPATH="/usr/src/libpod" \
- GOSRC="/var/tmp/go/src/github.com/containers/podman"
-
-# Only needed for installing build-time dependencies, then will be removed
-COPY . $GOSRC
-
-# Install packages from dependencies.txt, ignoring commented lines
-# Note: adding conmon and crun so podman command checks will work
-RUN dnf -y install \
- $(grep "^[^#]" $GOSRC/contrib/dependencies.txt) diffutils containers-common fuse-overlayfs conmon crun runc --exclude container-selinux; \
- sed -i -e 's|^#mount_program|mount_program|g' -e 's/# size.*/skip_mount_home = "true"/g' /etc/containers/storage.conf \
- && dnf clean all
-
-# Install dependencies
-RUN set -x && \
- mkdir -p "$GOBIN" && \
- mkdir -p /etc/cni/net.d && \
- mkdir -p /etc/containers && \
- install -D -m 755 $GOSRC/contrib/gate/entrypoint.sh /usr/local/bin/ && \
- python3 -m pip install pre-commit
-
-# Install cni config
-COPY cni/87-podman-bridge.conflist /etc/cni/net.d/87-podman-bridge.conflist
-# Make sure we have some policy for pulling images
-COPY test/redhat_sigstore.yaml /etc/containers/registries.d/registry.access.redhat.com.yaml
-
-WORKDIR "$GOSRC"
-RUN make install.tools && \
- cd / && \
- rm -rf "$GOSRC" && \
- mkdir -p "$GOSRC"
-VOLUME ["/usr/src/libpod"]
-# This entrypoint will synchronize the above volume ($SRCPATH) to $GOSRC before
-# executing make. This ensures the original source remains prestine and is never
-# modified by any lint/validation checks.
-ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
diff --git a/contrib/gate/README.md b/contrib/gate/README.md
deleted file mode 100644
index 6c33e1d74..000000000
--- a/contrib/gate/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-![PODMAN logo](../../logo/podman-logo-source.svg)
-
-The "gate" image is a standard container image for lint-checking and validating
-changes to the libpod repository. It must be built from the repository root as
-[described in the contibutors guide](https://github.com/containers/podman/blob/master/CONTRIBUTING.md#go-format-and-lint).
-The image is also used in [CI/CD automation](../../.cirrus.yml).
diff --git a/contrib/gate/entrypoint.sh b/contrib/gate/entrypoint.sh
deleted file mode 100755
index 102d012e5..000000000
--- a/contrib/gate/entrypoint.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-die() {
- echo "${2:-FATAL ERROR (but no message given!)} (gate container entrypoint)"
- exit ${1:-1}
-}
-
-[[ -n "$SRCPATH" ]] || die 1 "ERROR: \$SRCPATH must be non-empty"
-[[ -n "$GOPATH" ]] || die 2 "ERROR: \$GOPATH must be non-empty"
-[[ -n "$GOSRC" ]] || die 3 "ERROR: \$GOSRC must be non-empty"
-[[ -r "${SRCPATH}/contrib/gate/Dockerfile" ]] || \
- die 4 "ERROR: Expecting libpod repository root at $SRCPATH"
-
-# Working from a copy avoids needing to perturb the actual source files
-# if/when developers use gate container for local testing
-echo "Copying $SRCPATH to $GOSRC"
-mkdir -vp "$GOSRC"
-/usr/bin/rsync --recursive --links --quiet --safe-links \
- --perms --times --delete "${SRCPATH}/" "${GOSRC}/"
-cd "$GOSRC"
-exec make "$@"
diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index e14d1ed79..67584ffcc 100644
--- a/docs/source/markdown/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
@@ -58,6 +58,10 @@ Suppress output information when pulling images
Directory path for seccomp profiles (default: "/var/lib/kubelet/seccomp"). (Not available for remote commands)
+#### **--start**=*true|false*
+
+Start the pod after creating it, set to false to only create it.
+
#### **--tls-verify**=*true|false*
Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
diff --git a/docs/tutorials/image_signing.md b/docs/tutorials/image_signing.md
index f0adca9af..0d1d63de2 100644
--- a/docs/tutorials/image_signing.md
+++ b/docs/tutorials/image_signing.md
@@ -34,7 +34,7 @@ Now let’s assume that we run a container registry. For example we could simply
start one on our local machine:
```bash
-> sudo podman run -d -p 5000:5000 docker.io/registry
+sudo podman run -d -p 5000:5000 docker.io/registry
```
The registry does not know anything about image signing, it just provides the remote
@@ -44,11 +44,11 @@ have to take care of how to distribute the signatures.
Let’s choose a standard `alpine` image for our signing experiment:
```bash
-> sudo podman pull docker://docker.io/alpine:latest
+sudo podman pull docker://docker.io/alpine:latest
```
```bash
-> sudo podman images alpine
+sudo podman images alpine
REPOSITORY TAG IMAGE ID CREATED SIZE
docker.io/library/alpine latest e7d92cdc71fe 6 weeks ago 5.86 MB
```
@@ -56,11 +56,11 @@ docker.io/library/alpine latest e7d92cdc71fe 6 weeks ago 5.86 MB
Now we can re-tag the image to point it to our local registry:
```bash
-> sudo podman tag alpine localhost:5000/alpine
+sudo podman tag alpine localhost:5000/alpine
```
```bash
-> sudo podman images alpine
+sudo podman images alpine
REPOSITORY TAG IMAGE ID CREATED SIZE
localhost:5000/alpine latest e7d92cdc71fe 6 weeks ago 5.86 MB
docker.io/library/alpine latest e7d92cdc71fe 6 weeks ago 5.86 MB
@@ -84,7 +84,7 @@ We can see that we have two signature stores configured:
Now, let’s push and sign the image:
```bash
-> sudo -E GNUPGHOME=$HOME/.gnupg \
+sudo -E GNUPGHOME=$HOME/.gnupg \
podman push \
--tls-verify=false \
--sign-by sgrunert@suse.com \
@@ -97,7 +97,7 @@ If we now take a look at the systems signature storage, then we see that there
is a new signature available, which was caused by the image push:
```bash
-> sudo ls /var/lib/containers/sigstore
+sudo ls /var/lib/containers/sigstore
'alpine@sha256=e9b65ef660a3ff91d28cc50eba84f21798a6c5c39b4dd165047db49e84ae1fb9'
```
@@ -107,14 +107,14 @@ The default signature store in our edited version of
the local staging signature store:
```bash
-> sudo bash -c 'cd /var/lib/containers/sigstore && python3 -m http.server'
+sudo bash -c 'cd /var/lib/containers/sigstore && python3 -m http.server'
Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ...
```
Let’s remove the local images for our verification test:
```
-> sudo podman rmi docker.io/alpine localhost:5000/alpine
+sudo podman rmi docker.io/alpine localhost:5000/alpine
```
We have to write a policy to enforce that the signature has to be valid. This
@@ -142,13 +142,13 @@ below example, copy the `"docker"` entry into the `"transports"` section of your
The `keyPath` does not exist yet, so we have to put the GPG key there:
```bash
-> gpg --output /tmp/key.gpg --armor --export sgrunert@suse.com
+gpg --output /tmp/key.gpg --armor --export sgrunert@suse.com
```
If we now pull the image:
```bash
-> sudo podman pull --tls-verify=false localhost:5000/alpine
+sudo podman pull --tls-verify=false localhost:5000/alpine
Storing signatures
e7d92cdc71feacf90708cb59182d0df1b911f8ae022d29e8e95d75ca6a99776a
@@ -164,14 +164,14 @@ accessed:
As an counterpart example, if we specify the wrong key at `/tmp/key.gpg`:
```bash
-> gpg --output /tmp/key.gpg --armor --export mail@saschagrunert.de
+gpg --output /tmp/key.gpg --armor --export mail@saschagrunert.de
File '/tmp/key.gpg' exists. Overwrite? (y/N) y
```
Then a pull is not possible any more:
```bash
-> sudo podman pull --tls-verify=false localhost:5000/alpine
+sudo podman pull --tls-verify=false localhost:5000/alpine
Trying to pull localhost:5000/alpine...
Error: error pulling image "localhost:5000/alpine": unable to pull localhost:5000/alpine: unable to pull image: Source image rejected: Invalid GPG signature: …
```
diff --git a/docs/tutorials/mac_win_client.md b/docs/tutorials/mac_win_client.md
index 9e0798bbf..af2668e10 100644
--- a/docs/tutorials/mac_win_client.md
+++ b/docs/tutorials/mac_win_client.md
@@ -36,7 +36,7 @@ $ systemctl --user enable --now podman.socket
You will need to enable linger for this user in order for the socket to work when the user is not logged in.
```
-$ sudo loginctl enable-linger $USER
+sudo loginctl enable-linger $USER
```
You can verify that the socket is listening with a simple Podman command.
@@ -55,7 +55,7 @@ host:
In order for the client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled.
```
-$ sudo systemctl enable -s sshd
+sudo systemctl enable --now -s sshd
```
#### Setting up SSH
diff --git a/docs/tutorials/podman_tutorial.md b/docs/tutorials/podman_tutorial.md
index 85b95af04..c15de67a6 100644
--- a/docs/tutorials/podman_tutorial.md
+++ b/docs/tutorials/podman_tutorial.md
@@ -41,7 +41,7 @@ Note: If you add *-a* to the *ps* command, Podman will show all containers.
You can "inspect" a running container for metadata and details about itself. We can even use
the inspect subcommand to see what IP address was assigned to the container. As the container is running in rootless mode, an IP address is not assigned and the value will be listed as "none" in the output from inspect.
```console
-$ podman inspect -l | grep IPAddress\":
+podman inspect -l | grep IPAddress\":
"SecondaryIPAddresses": null,
"IPAddress": "",
```
@@ -60,7 +60,7 @@ curl http://<IP_address>:8080
### Viewing the container's logs
You can view the container's logs with Podman as well:
```console
-$ sudo podman logs --latest
+podman logs --latest
10.88.0.1 - - [07/Feb/2018:15:22:11 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.55.1" "-"
10.88.0.1 - - [07/Feb/2018:15:22:30 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.55.1" "-"
10.88.0.1 - - [07/Feb/2018:15:22:30 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.55.1" "-"
@@ -71,7 +71,7 @@ $ sudo podman logs --latest
### Viewing the container's pids
And you can observe the httpd pid in the container with *top*.
```console
-$ sudo podman top <container_id>
+podman top <container_id>
UID PID PPID C STIME TTY TIME CMD
0 31873 31863 0 09:21 ? 00:00:00 nginx: master process nginx -g daemon off;
101 31889 31873 0 09:21 ? 00:00:00 nginx: worker process
@@ -81,6 +81,8 @@ $ sudo podman top <container_id>
Checkpointing a container stops the container while writing the state of all processes in the container to disk.
With this a container can later be restored and continue running at exactly the same point in time as the
checkpoint. This capability requires CRIU 3.11 or later installed on the system.
+This feature is not supported as rootless; as such, if you wish to try it, you'll need to re-create your container as root, using the same command but with sudo.
+
To checkpoint the container use:
```console
sudo podman container checkpoint <container_id>
@@ -124,18 +126,18 @@ curl http://<IP_address>:8080
### Stopping the container
To stop the httpd container:
```console
-sudo podman stop --latest
+podman stop --latest
```
You can also check the status of one or more containers using the *ps* subcommand. In this case, we should
use the *-a* argument to list all containers.
```console
-sudo podman ps -a
+podman ps -a
```
### Removing the container
To remove the httpd container:
```console
-sudo podman rm --latest
+podman rm --latest
```
You can verify the deletion of the container by running *podman ps -a*.
diff --git a/docs/tutorials/remote_client.md b/docs/tutorials/remote_client.md
index ad506d19a..e39d804a6 100644
--- a/docs/tutorials/remote_client.md
+++ b/docs/tutorials/remote_client.md
@@ -29,19 +29,19 @@ You will need to [install Podman](https://podman.io/getting-started/installation
Before performing any Podman client commands, you must enable the podman.sock SystemD service on the Linux server. In these examples, we are running Podman as a normal, unprivileged user, also known as a rootless user. By default, the rootless socket listens at `/run/user/${UID}/podman/podman.sock`. You can enable this socket permanently using the following command:
```
-$ systemctl --user enable podman.socket
+systemctl --user enable --now podman.socket
```
You will need to enable linger for this user in order for the socket to work when the user is not logged in:
```
-$ sudo loginctl enable-linger $USER
+sudo loginctl enable-linger $USER
```
This is only required if you are not running Podman as root.
You can verify that the socket is listening with a simple Podman command.
```
-$ podman --remote info
+podman --remote info
host:
arch: amd64
buildahVersion: 1.16.0-dev
@@ -54,13 +54,13 @@ host:
In order for the Podman client to communicate with the server you need to enable and start the SSH daemon on your Linux machine, if it is not currently enabled.
```
-$ sudo systemctl enable -s sshd
+sudo systemctl enable --now -s sshd
```
#### Setting up SSH
Remote Podman uses SSH to communicate between the client and server. The remote client works considerably smoother using SSH keys. To set up your ssh connection, you need to generate an ssh key pair from your client machine.
```
-$ ssh-keygen
+ssh-keygen
```
Your public key by default should be in your home directory under ~/.ssh/id_rsa.pub. You then need to copy the contents of id_rsa.pub and append it into ~/.ssh/authorized_keys on the Linux server. You can automate this using ssh-copy-id.
@@ -75,13 +75,13 @@ The first step in using the Podman remote client is to configure a connection.
You can add a connection by using the `podman-remote system connection add` command.
```
-$ podman-remote system connection add myuser --identity ~/.ssh/id_rsa ssh://192.168.122.1/run/user/1000/podman/podman.sock
+podman-remote system connection add myuser --identity ~/.ssh/id_rsa ssh://192.168.122.1/run/user/1000/podman/podman.sock
```
This will add a remote connection to Podman and if it is the first connection added, it will mark the connection as the default. You can observe your connections with `podman-remote system connection list`:
```
-$ podman-remote system connection list
+podman-remote system connection list
Name Identity URI
myuser* id_rsa ssh://myuser@192.168.122.1/run/user/1000/podman/podman.sock
```
@@ -89,7 +89,7 @@ myuser* id_rsa ssh://myuser@192.168.122.1/run/user/1000/podman/podman.s
Now we can test the connection with `podman info`:
```
-$ podman-remote info
+podman-remote info
host:
arch: amd64
buildahVersion: 1.16.0-dev
@@ -101,7 +101,7 @@ host:
Podman-remote has also introduced a “--connection” flag where you can use other connections you have defined. If no connection is provided, the default connection will be used.
```
-$ podman-remote system connection --help
+podman-remote system connection --help
```
## Wrap up
diff --git a/docs/tutorials/rootless_tutorial.md b/docs/tutorials/rootless_tutorial.md
index 3b9cbd2d0..9d8851bc8 100644
--- a/docs/tutorials/rootless_tutorial.md
+++ b/docs/tutorials/rootless_tutorial.md
@@ -6,14 +6,14 @@ Prior to allowing users without root privileges to run Podman, the administrator
## cgroup V2 support
-The cgroup V2 Linux kernel feature allows the user to limit the amount of resources a rootless container can use. If the Linux distribution that you are running Podman on is enabled with cgroup V2 then you might need to change the default OCI Runtime. The default runtime `runc` does not currently work with cgroup V2 enabled systems, so you have to switch to the alternative OCI runtime `crun`.
+The cgroup V2 Linux kernel feature allows the user to limit the amount of resources a rootless container can use. If the Linux distribution that you are running Podman on is enabled with cgroup V2 then you might need to change the default OCI Runtime. Some older versions of `runc` do not work with cgroup V2, you might have to switch to the alternative OCI runtime `crun`.
-The alternative OCI runtime support for cgroup V2 can be turned on at the command line by using the `--runtime` option:
+The alternative OCI runtime support for cgroup V2 can also be turned on at the command line by using the `--runtime` option:
```
-sudo podman --runtime /usr/bin/crun
+podman --runtime crun
```
-or by changing the value for the "Default OCI runtime" in the containers.conf file either at the system level or at the [user level](#user-configuration-files) from `runtime = "runc"` to `runtime = "crun"`.
+or for all commands by changing the value for the "Default OCI runtime" in the containers.conf file either at the system level or at the [user level](#user-configuration-files) from `runtime = "runc"` to `runtime = "crun"`.
## Administrator Actions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index be0adfe6a..dcb2ff751 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -1296,10 +1296,6 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
}
ctrAliasesBkt := dbCtr.Bucket(aliasesBkt)
- if ctrAliasesBkt == nil {
- return errors.Wrapf(define.ErrNoAliases, "container %s has no network aliases", ctr.ID())
- }
-
ctrNetworksBkt := dbCtr.Bucket(networksBkt)
if ctrNetworksBkt == nil {
return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not connected to any CNI networks, so cannot disconnect", ctr.ID())
@@ -1313,13 +1309,15 @@ func (s *BoltState) NetworkDisconnect(ctr *Container, network string) error {
return errors.Wrapf(err, "error removing container %s from network %s", ctr.ID(), network)
}
- bktExists := ctrAliasesBkt.Bucket([]byte(network))
- if bktExists == nil {
- return nil
- }
+ if ctrAliasesBkt != nil {
+ bktExists := ctrAliasesBkt.Bucket([]byte(network))
+ if bktExists == nil {
+ return nil
+ }
- if err := ctrAliasesBkt.DeleteBucket([]byte(network)); err != nil {
- return errors.Wrapf(err, "error removing container %s network aliases for network %s", ctr.ID(), network)
+ if err := ctrAliasesBkt.DeleteBucket([]byte(network)); err != nil {
+ return errors.Wrapf(err, "error removing container %s network aliases for network %s", ctr.ID(), network)
+ }
}
return nil
diff --git a/libpod/container.go b/libpod/container.go
index 333e1d848..9009a4ec8 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -1088,3 +1088,17 @@ func (c *Container) networks() ([]string, error) {
return networks, err
}
+
+// networksByNameIndex provides us with a map of container networks where key
+// is network name and value is the index position
+func (c *Container) networksByNameIndex() (map[string]int, error) {
+ networks, err := c.networks()
+ if err != nil {
+ return nil, err
+ }
+ networkNamesByIndex := make(map[string]int, len(networks))
+ for index, name := range networks {
+ networkNamesByIndex[name] = index
+ }
+ return networkNamesByIndex, nil
+}
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index a4115eb92..2fa91166f 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -67,7 +67,7 @@ type InspectPodInfraConfig struct {
StaticIP net.IP
// StaticMAC is a static MAC address that will be assigned to the infra
// container and then used by the pod.
- StaticMAC net.HardwareAddr
+ StaticMAC string
// NoManageResolvConf indicates that the pod will not manage resolv.conf
// and instead each container will handle their own.
NoManageResolvConf bool
diff --git a/libpod/filters/containers.go b/libpod/filters/containers.go
index da1b5b263..2520c4f30 100644
--- a/libpod/filters/containers.go
+++ b/libpod/filters/containers.go
@@ -1,7 +1,6 @@
package lpfilters
import (
- "regexp"
"strconv"
"strings"
"time"
@@ -11,101 +10,133 @@ import (
"github.com/containers/podman/v2/pkg/timetype"
"github.com/containers/podman/v2/pkg/util"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
// GenerateContainerFilterFuncs return ContainerFilter functions based of filter.
-func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
+func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpod.Runtime) (func(container *libpod.Container) bool, error) {
switch filter {
case "id":
+ // we only have to match one ID
return func(c *libpod.Container) bool {
- return strings.Contains(c.ID(), filterValue)
+ return util.StringMatchRegexSlice(c.ID(), filterValues)
}, nil
case "label":
- var filterArray = strings.SplitN(filterValue, "=", 2)
- var filterKey = filterArray[0]
- if len(filterArray) > 1 {
- filterValue = filterArray[1]
- } else {
- filterValue = ""
- }
+ // we have to match that all given labels exits on that container
return func(c *libpod.Container) bool {
- for labelKey, labelValue := range c.Labels() {
- if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
- return true
+ labels := c.Labels()
+ for _, filterValue := range filterValues {
+ matched := false
+ filterArray := strings.SplitN(filterValue, "=", 2)
+ filterKey := filterArray[0]
+ if len(filterArray) > 1 {
+ filterValue = filterArray[1]
+ } else {
+ filterValue = ""
+ }
+ for labelKey, labelValue := range labels {
+ if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ return false
}
}
- return false
+ return true
}, nil
case "name":
+ // we only have to match one name
return func(c *libpod.Container) bool {
- match, err := regexp.MatchString(filterValue, c.Name())
- if err != nil {
- logrus.Errorf("Failed to compile regex for 'name' filter: %v", err)
- return false
- }
- return match
+ return util.StringMatchRegexSlice(c.Name(), filterValues)
}, nil
case "exited":
- exitCode, err := strconv.ParseInt(filterValue, 10, 32)
- if err != nil {
- return nil, errors.Wrapf(err, "exited code out of range %q", filterValue)
+ var exitCodes []int32
+ for _, exitCode := range filterValues {
+ ec, err := strconv.ParseInt(exitCode, 10, 32)
+ if err != nil {
+ return nil, errors.Wrapf(err, "exited code out of range %q", ec)
+ }
+ exitCodes = append(exitCodes, int32(ec))
}
return func(c *libpod.Container) bool {
ec, exited, err := c.ExitCode()
- if ec == int32(exitCode) && err == nil && exited {
- return true
+ if err == nil && exited {
+ for _, exitCode := range exitCodes {
+ if ec == exitCode {
+ return true
+ }
+ }
}
return false
}, nil
case "status":
- if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
- return nil, errors.Errorf("%s is not a valid status", filterValue)
+ for _, filterValue := range filterValues {
+ if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) {
+ return nil, errors.Errorf("%s is not a valid status", filterValue)
+ }
}
return func(c *libpod.Container) bool {
status, err := c.State()
if err != nil {
return false
}
- if filterValue == "stopped" {
- filterValue = "exited"
- }
state := status.String()
if status == define.ContainerStateConfigured {
state = "created"
} else if status == define.ContainerStateStopped {
state = "exited"
}
- return state == filterValue
+ for _, filterValue := range filterValues {
+ if filterValue == "stopped" {
+ filterValue = "exited"
+ }
+ if state == filterValue {
+ return true
+ }
+ }
+ return false
}, nil
case "ancestor":
// This needs to refine to match docker
// - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
return func(c *libpod.Container) bool {
- containerConfig := c.Config()
- if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
- return true
+ for _, filterValue := range filterValues {
+ containerConfig := c.Config()
+ if strings.Contains(containerConfig.RootfsImageID, filterValue) || strings.Contains(containerConfig.RootfsImageName, filterValue) {
+ return true
+ }
}
return false
}, nil
case "before":
- ctr, err := r.LookupContainer(filterValue)
- if err != nil {
- return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
+ var createTime time.Time
+ for _, filterValue := range filterValues {
+ ctr, err := r.LookupContainer(filterValue)
+ if err != nil {
+ return nil, err
+ }
+ containerConfig := ctr.Config()
+ if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) {
+ createTime = containerConfig.CreatedTime
+ }
}
- containerConfig := ctr.Config()
- createTime := containerConfig.CreatedTime
return func(c *libpod.Container) bool {
cc := c.Config()
return createTime.After(cc.CreatedTime)
}, nil
case "since":
- ctr, err := r.LookupContainer(filterValue)
- if err != nil {
- return nil, errors.Errorf("unable to find container by name or id of %s", filterValue)
+ var createTime time.Time
+ for _, filterValue := range filterValues {
+ ctr, err := r.LookupContainer(filterValue)
+ if err != nil {
+ return nil, err
+ }
+ containerConfig := ctr.Config()
+ if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) {
+ createTime = containerConfig.CreatedTime
+ }
}
- containerConfig := ctr.Config()
- createTime := containerConfig.CreatedTime
return func(c *libpod.Container) bool {
cc := c.Config()
return createTime.Before(cc.CreatedTime)
@@ -115,17 +146,27 @@ func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
return func(c *libpod.Container) bool {
containerConfig := c.Config()
var dest string
- arr := strings.Split(filterValue, ":")
- source := arr[0]
- if len(arr) == 2 {
- dest = arr[1]
- }
- for _, mount := range containerConfig.Spec.Mounts {
- if dest != "" && (mount.Source == source && mount.Destination == dest) {
- return true
+ for _, filterValue := range filterValues {
+ arr := strings.SplitN(filterValue, ":", 2)
+ source := arr[0]
+ if len(arr) == 2 {
+ dest = arr[1]
}
- if dest == "" && mount.Source == source {
- return true
+ for _, mount := range containerConfig.Spec.Mounts {
+ if dest != "" && (mount.Source == source && mount.Destination == dest) {
+ return true
+ }
+ if dest == "" && mount.Source == source {
+ return true
+ }
+ }
+ for _, vname := range containerConfig.NamedVolumes {
+ if dest != "" && (vname.Name == source && vname.Dest == dest) {
+ return true
+ }
+ if dest == "" && vname.Name == source {
+ return true
+ }
}
}
return false
@@ -136,10 +177,18 @@ func GenerateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
if err != nil {
return false
}
- return hcStatus == filterValue
+ for _, filterValue := range filterValues {
+ if hcStatus == filterValue {
+ return true
+ }
+ }
+ return false
}, nil
case "until":
- ts, err := timetype.GetTimestamp(filterValue, time.Now())
+ if len(filterValues) != 1 {
+ return nil, errors.Errorf("specify exactly one timestamp for %s", filter)
+ }
+ ts, err := timetype.GetTimestamp(filterValues[0], time.Now())
if err != nil {
return nil, err
}
diff --git a/libpod/network/create.go b/libpod/network/create.go
index c11904ecf..387f4fcd3 100644
--- a/libpod/network/create.go
+++ b/libpod/network/create.go
@@ -8,7 +8,7 @@ import (
"path/filepath"
"github.com/containernetworking/cni/pkg/version"
- "github.com/containers/podman/v2/libpod"
+ "github.com/containers/common/pkg/config"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/rootless"
"github.com/containers/podman/v2/pkg/util"
@@ -16,25 +16,21 @@ import (
)
// Create the CNI network
-func Create(name string, options entities.NetworkCreateOptions, r *libpod.Runtime) (*entities.NetworkCreateReport, error) {
+func Create(name string, options entities.NetworkCreateOptions, runtimeConfig *config.Config) (*entities.NetworkCreateReport, error) {
var fileName string
if err := isSupportedDriver(options.Driver); err != nil {
return nil, err
}
- config, err := r.GetConfig()
- if err != nil {
- return nil, err
- }
// Acquire a lock for CNI
- l, err := acquireCNILock(filepath.Join(config.Engine.TmpDir, LockFileName))
+ l, err := acquireCNILock(filepath.Join(runtimeConfig.Engine.TmpDir, LockFileName))
if err != nil {
return nil, err
}
defer l.releaseCNILock()
if len(options.MacVLAN) > 0 {
- fileName, err = createMacVLAN(r, name, options)
+ fileName, err = createMacVLAN(name, options, runtimeConfig)
} else {
- fileName, err = createBridge(r, name, options)
+ fileName, err = createBridge(name, options, runtimeConfig)
}
if err != nil {
return nil, err
@@ -81,17 +77,17 @@ func validateBridgeOptions(options entities.NetworkCreateOptions) error {
}
// createBridge creates a CNI network
-func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreateOptions) (string, error) {
+func createBridge(name string, options entities.NetworkCreateOptions, runtimeConfig *config.Config) (string, error) {
+ var (
+ ipamRanges [][]IPAMLocalHostRangeConf
+ err error
+ routes []IPAMRoute
+ )
isGateway := true
ipMasq := true
- runtimeConfig, err := r.GetConfig()
- if err != nil {
- return "", err
- }
// validate options
- err = validateBridgeOptions(options)
- if err != nil {
+ if err := validateBridgeOptions(options); err != nil {
return "", err
}
@@ -102,8 +98,6 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
subnet := &options.Subnet
ipRange := &options.Range
gateway := options.Gateway
- var ipamRanges [][]IPAMLocalHostRangeConf
- var routes []IPAMRoute
if subnet.IP != nil {
// if network is provided, does it conflict with existing CNI or live networks
err = ValidateUserNetworkIsAvailable(runtimeConfig, subnet)
@@ -201,7 +195,7 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate
return cniPathName, err
}
-func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreateOptions) (string, error) {
+func createMacVLAN(name string, options entities.NetworkCreateOptions, runtimeConfig *config.Config) (string, error) {
var (
plugins []CNIPlugins
)
@@ -210,17 +204,12 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
return "", err
}
- config, err := r.GetConfig()
- if err != nil {
- return "", err
- }
-
// Make sure the host-device exists
if !util.StringInSlice(options.MacVLAN, liveNetNames) {
return "", errors.Errorf("failed to find network interface %q", options.MacVLAN)
}
if len(name) > 0 {
- netNames, err := GetNetworkNamesFromFileSystem(config)
+ netNames, err := GetNetworkNamesFromFileSystem(runtimeConfig)
if err != nil {
return "", err
}
@@ -228,7 +217,7 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
return "", errors.Errorf("the network name %s is already used", name)
}
} else {
- name, err = GetFreeDeviceName(config)
+ name, err = GetFreeDeviceName(runtimeConfig)
if err != nil {
return "", err
}
@@ -241,7 +230,7 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat
if err != nil {
return "", err
}
- cniPathName := filepath.Join(GetCNIConfDir(config), fmt.Sprintf("%s.conflist", name))
+ cniPathName := filepath.Join(GetCNIConfDir(runtimeConfig), fmt.Sprintf("%s.conflist", name))
err = ioutil.WriteFile(cniPathName, b, 0644)
return cniPathName, err
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index fed90cfc3..3882e095a 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -13,6 +13,7 @@ import (
"os"
"os/exec"
"path/filepath"
+ "sort"
"strings"
"syscall"
"time"
@@ -20,6 +21,7 @@ import (
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/podman/v2/libpod/define"
+ "github.com/containers/podman/v2/libpod/network"
"github.com/containers/podman/v2/pkg/errorhandling"
"github.com/containers/podman/v2/pkg/netns"
"github.com/containers/podman/v2/pkg/rootless"
@@ -981,3 +983,139 @@ func (w *logrusDebugWriter) Write(p []byte) (int, error) {
logrus.Debugf("%s%s", w.prefix, string(p))
return len(p), nil
}
+
+// DisconnectContainerFromNetwork removes a container from its CNI network
+func (r *Runtime) DisconnectContainerFromNetwork(nameOrID, netName string, force bool) error {
+ ctr, err := r.LookupContainer(nameOrID)
+ if err != nil {
+ return err
+ }
+
+ networks, err := ctr.networksByNameIndex()
+ if err != nil {
+ return err
+ }
+
+ exists, err := network.Exists(r.config, netName)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return errors.Wrap(define.ErrNoSuchNetwork, netName)
+ }
+
+ index, nameExists := networks[netName]
+ if !nameExists && len(networks) > 0 {
+ return errors.Errorf("container %s is not connected to network %s", nameOrID, netName)
+ }
+
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+ if err := ctr.syncContainer(); err != nil {
+ return err
+ }
+
+ podConfig := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), []string{netName}, ctr.config.PortMappings, nil, nil)
+ if err := r.netPlugin.TearDownPod(podConfig); err != nil {
+ return err
+ }
+ if err := r.state.NetworkDisconnect(ctr, netName); err != nil {
+ return err
+ }
+
+ // update network status
+ networkStatus := ctr.state.NetworkStatus
+ // if len is one and we confirmed earlier that the container is in
+ // fact connected to the network, then just return an empty slice
+ if len(networkStatus) == 1 {
+ ctr.state.NetworkStatus = make([]*cnitypes.Result, 0)
+ } else {
+ // clip out the index of the network
+ networkStatus[len(networkStatus)-1], networkStatus[index] = networkStatus[index], networkStatus[len(networkStatus)-1]
+ // shorten the slice by one
+ ctr.state.NetworkStatus = networkStatus[:len(networkStatus)-1]
+ }
+ return nil
+}
+
+// ConnectContainerToNetwork connects a container to a CNI network
+func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, aliases []string) error {
+ ctr, err := r.LookupContainer(nameOrID)
+ if err != nil {
+ return err
+ }
+
+ networks, err := ctr.networksByNameIndex()
+ if err != nil {
+ return err
+ }
+
+ exists, err := network.Exists(r.config, netName)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return errors.Wrap(define.ErrNoSuchNetwork, netName)
+ }
+
+ _, nameExists := networks[netName]
+ if !nameExists && len(networks) > 0 {
+ return errors.Errorf("container %s is not connected to network %s", nameOrID, netName)
+ }
+
+ ctr.lock.Lock()
+ defer ctr.lock.Unlock()
+ if err := ctr.syncContainer(); err != nil {
+ return err
+ }
+
+ if err := r.state.NetworkConnect(ctr, netName, aliases); err != nil {
+ return err
+ }
+
+ podConfig := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), []string{netName}, ctr.config.PortMappings, nil, nil)
+ podConfig.Aliases = make(map[string][]string, 1)
+ podConfig.Aliases[netName] = aliases
+ results, err := r.netPlugin.SetUpPod(podConfig)
+ if err != nil {
+ return err
+ }
+ if len(results) != 1 {
+ return errors.New("when adding aliases, results must be of length 1")
+ }
+
+ networkResults := make([]*cnitypes.Result, 0)
+ for _, r := range results {
+ resultCurrent, err := cnitypes.GetResult(r.Result)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result, err)
+ }
+ networkResults = append(networkResults, resultCurrent)
+ }
+
+ // update network status
+ networkStatus := ctr.state.NetworkStatus
+ // if len is one and we confirmed earlier that the container is in
+ // fact connected to the network, then just return an empty slice
+ if len(networkStatus) == 0 {
+ ctr.state.NetworkStatus = append(ctr.state.NetworkStatus, networkResults...)
+ } else {
+ // build a list of network names so we can sort and
+ // get the new name's index
+ var networkNames []string
+ for netName := range networks {
+ networkNames = append(networkNames, netName)
+ }
+ networkNames = append(networkNames, netName)
+ // sort
+ sort.Strings(networkNames)
+ // get index of new network name
+ index := sort.SearchStrings(networkNames, netName)
+ // Append a zero value to to the slice
+ networkStatus = append(networkStatus, &cnitypes.Result{})
+ // populate network status
+ copy(networkStatus[index+1:], networkStatus[index:])
+ networkStatus[index] = networkResults[0]
+ }
+ return nil
+}
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index 87ac5c07a..845948dd3 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -535,7 +535,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
infraConfig = new(define.InspectPodInfraConfig)
infraConfig.HostNetwork = p.config.InfraContainer.HostNetwork
infraConfig.StaticIP = p.config.InfraContainer.StaticIP
- infraConfig.StaticMAC = p.config.InfraContainer.StaticMAC
+ infraConfig.StaticMAC = p.config.InfraContainer.StaticMAC.String()
infraConfig.NoManageResolvConf = p.config.InfraContainer.UseImageResolvConf
infraConfig.NoManageHosts = p.config.InfraContainer.UseImageHosts
diff --git a/pkg/api/handlers/compat/containers_prune.go b/pkg/api/handlers/compat/containers_prune.go
index 397feac9a..2cfeebcce 100644
--- a/pkg/api/handlers/compat/containers_prune.go
+++ b/pkg/api/handlers/compat/containers_prune.go
@@ -16,7 +16,6 @@ func PruneContainers(w http.ResponseWriter, r *http.Request) {
var (
delContainers []string
space int64
- filterFuncs []libpod.ContainerFilter
)
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
@@ -28,15 +27,14 @@ func PruneContainers(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
return
}
+ filterFuncs := make([]libpod.ContainerFilter, 0, len(query.Filters))
for k, v := range query.Filters {
- for _, val := range v {
- generatedFunc, err := lpfilters.GenerateContainerFilterFuncs(k, val, runtime)
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
- filterFuncs = append(filterFuncs, generatedFunc)
+ generatedFunc, err := lpfilters.GenerateContainerFilterFuncs(k, v, runtime)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
}
+ filterFuncs = append(filterFuncs, generatedFunc)
}
// Libpod response differs
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index abbb6d2c0..64ddebf9c 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -312,48 +312,40 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
}
// Connect adds a container to a network
-// TODO: For now this func is a no-op that checks the container name, network name, and
-// responds with a 200. This allows the call to remain intact. We need to decide how
-// we make this work with CNI networking and setup/teardown.
func Connect(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
- var netConnect types.NetworkConnect
+ var (
+ aliases []string
+ netConnect types.NetworkConnect
+ )
if err := json.NewDecoder(r.Body).Decode(&netConnect); err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
return
}
- config, err := runtime.GetConfig()
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
name := utils.GetName(r)
- exists, err := network.Exists(config, name)
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
- if !exists {
- utils.Error(w, "network not found", http.StatusNotFound, define.ErrNoSuchNetwork)
- return
+ if netConnect.EndpointConfig != nil {
+ if netConnect.EndpointConfig.Aliases != nil {
+ aliases = netConnect.EndpointConfig.Aliases
+ }
}
- if _, err = runtime.LookupContainer(netConnect.Container); err != nil {
+ err := runtime.ConnectContainerToNetwork(netConnect.Container, name, aliases)
+ if err != nil {
if errors.Cause(err) == define.ErrNoSuchCtr {
utils.ContainerNotFound(w, netConnect.Container, err)
return
}
- utils.Error(w, "unable to lookup container", http.StatusInternalServerError, err)
+ if errors.Cause(err) == define.ErrNoSuchNetwork {
+ utils.Error(w, "network not found", http.StatusNotFound, err)
+ return
+ }
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err)
return
}
- logrus.Warnf("network connect endpoint is not fully implemented - tried to connect container %s to network %s", netConnect.Container, name)
utils.WriteResponse(w, http.StatusOK, "OK")
}
// Disconnect removes a container from a network
-// TODO: For now this func is a no-op that checks the container name, network name, and
-// responds with a 200. This allows the call to remain intact. We need to decide how
-// we make this work with CNI networking and setup/teardown.
func Disconnect(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
@@ -362,29 +354,20 @@ func Disconnect(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
return
}
- config, err := runtime.GetConfig()
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
+
name := utils.GetName(r)
- exists, err := network.Exists(config, name)
+ err := runtime.DisconnectContainerFromNetwork(netDisconnect.Container, name, netDisconnect.Force)
if err != nil {
- utils.InternalServerError(w, err)
- return
- }
- if !exists {
- utils.Error(w, "network not found", http.StatusNotFound, define.ErrNoSuchNetwork)
- return
- }
- if _, err = runtime.LookupContainer(netDisconnect.Container); err != nil {
if errors.Cause(err) == define.ErrNoSuchCtr {
- utils.ContainerNotFound(w, netDisconnect.Container, err)
+ utils.Error(w, "container not found", http.StatusNotFound, err)
+ return
+ }
+ if errors.Cause(err) == define.ErrNoSuchNetwork {
+ utils.Error(w, "network not found", http.StatusNotFound, err)
return
}
- utils.Error(w, "unable to lookup container", http.StatusInternalServerError, err)
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err)
return
}
- logrus.Warnf("network disconnect endpoint is not fully implemented - tried to connect container %s to network %s", netDisconnect.Container, name)
utils.WriteResponse(w, http.StatusOK, "OK")
}
diff --git a/pkg/api/handlers/libpod/play.go b/pkg/api/handlers/libpod/play.go
index 0c7a6e19d..42ff26a57 100644
--- a/pkg/api/handlers/libpod/play.go
+++ b/pkg/api/handlers/libpod/play.go
@@ -23,8 +23,10 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
Network string `schema:"reference"`
TLSVerify bool `schema:"tlsVerify"`
LogDriver string `schema:"logDriver"`
+ Start bool `schema:"start"`
}{
TLSVerify: true,
+ Start: true,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
@@ -73,6 +75,9 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
if _, found := r.URL.Query()["tlsVerify"]; found {
options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
}
+ if _, found := r.URL.Query()["start"]; found {
+ options.Start = types.NewOptionalBool(query.Start)
+ }
report, err := containerEngine.PlayKube(r.Context(), tmpfile.Name(), options)
if err != nil {
diff --git a/pkg/api/server/register_play.go b/pkg/api/server/register_play.go
index e41f8311d..6aa349a3b 100644
--- a/pkg/api/server/register_play.go
+++ b/pkg/api/server/register_play.go
@@ -29,6 +29,11 @@ func (s *APIServer) registerPlayHandlers(r *mux.Router) error {
// name: logDriver
// type: string
// description: Logging driver for the containers in the pod.
+ // - in: query
+ // name: start
+ // type: boolean
+ // default: true
+ // description: Start the pod after creating it.
// - in: body
// name: request
// description: Kubernetes YAML file.
diff --git a/pkg/bindings/play/play.go b/pkg/bindings/play/play.go
index 8af3b8fb1..cfb40d74b 100644
--- a/pkg/bindings/play/play.go
+++ b/pkg/bindings/play/play.go
@@ -30,7 +30,10 @@ func Kube(ctx context.Context, path string, options entities.PlayKubeOptions) (*
params.Set("network", options.Network)
params.Set("logDriver", options.LogDriver)
if options.SkipTLSVerify != types.OptionalBoolUndefined {
- params.Set("tlsVerify", strconv.FormatBool(options.SkipTLSVerify == types.OptionalBoolTrue))
+ params.Set("tlsVerify", strconv.FormatBool(options.SkipTLSVerify != types.OptionalBoolTrue))
+ }
+ if options.Start != types.OptionalBoolUndefined {
+ params.Set("start", strconv.FormatBool(options.Start == types.OptionalBoolTrue))
}
// TODO: have a global system context we can pass around (1st argument)
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 7e4afcc28..0b42e1a3f 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -28,6 +28,8 @@ type PlayKubeOptions struct {
ConfigMaps []string
// LogDriver for the container. For example: journald
LogDriver string
+ // Start - don't start the pod if false
+ Start types.OptionalBool
}
// PlayKubePod represents a single pod and associated containers created by play kube
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 855f9ece8..4b69ac74e 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -205,15 +205,13 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
}
func (ic *ContainerEngine) ContainerPrune(ctx context.Context, options entities.ContainerPruneOptions) (*entities.ContainerPruneReport, error) {
- var filterFuncs []libpod.ContainerFilter
+ filterFuncs := make([]libpod.ContainerFilter, 0, len(options.Filters))
for k, v := range options.Filters {
- for _, val := range v {
- generatedFunc, err := lpfilters.GenerateContainerFilterFuncs(k, val, ic.Libpod)
- if err != nil {
- return nil, err
- }
- filterFuncs = append(filterFuncs, generatedFunc)
+ generatedFunc, err := lpfilters.GenerateContainerFilterFuncs(k, v, ic.Libpod)
+ if err != nil {
+ return nil, err
}
+ filterFuncs = append(filterFuncs, generatedFunc)
}
return ic.pruneContainersHelper(filterFuncs)
}
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index 4f572fb88..06941f8d0 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -110,7 +110,11 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
}
func (ic *ContainerEngine) NetworkCreate(ctx context.Context, name string, options entities.NetworkCreateOptions) (*entities.NetworkCreateReport, error) {
- return network.Create(name, options, ic.Libpod)
+ runtimeConfig, err := ic.Libpod.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+ return network.Create(name, options, runtimeConfig)
}
func ifPassesFilterTest(netconf *libcni.NetworkConfigList, filter []string) bool {
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index c0948e099..4bcc6469c 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -297,20 +297,22 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
containers = append(containers, ctr)
}
- //start the containers
- podStartErrors, err := pod.Start(ctx)
- if err != nil {
- return nil, err
- }
+ if options.Start != types.OptionalBoolFalse {
+ //start the containers
+ podStartErrors, err := pod.Start(ctx)
+ if err != nil {
+ return nil, err
+ }
- // Previous versions of playkube started containers individually and then
- // looked for errors. Because we now use the uber-Pod start call, we should
- // iterate the map of possible errors and return one if there is a problem. This
- // keeps the behavior the same
+ // Previous versions of playkube started containers individually and then
+ // looked for errors. Because we now use the uber-Pod start call, we should
+ // iterate the map of possible errors and return one if there is a problem. This
+ // keeps the behavior the same
- for _, e := range podStartErrors {
- if e != nil {
- return nil, e
+ for _, e := range podStartErrors {
+ if e != nil {
+ return nil, e
+ }
}
}
diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go
index 26c9c7e2e..b786a5fbf 100644
--- a/pkg/domain/infra/runtime_libpod.go
+++ b/pkg/domain/infra/runtime_libpod.go
@@ -6,8 +6,10 @@ import (
"context"
"fmt"
"os"
+ "os/signal"
"sync"
+ "github.com/containers/podman/v2/cmd/podman/utils"
"github.com/containers/podman/v2/libpod"
"github.com/containers/podman/v2/pkg/cgroups"
"github.com/containers/podman/v2/pkg/domain/entities"
@@ -16,6 +18,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
flag "github.com/spf13/pflag"
)
@@ -348,3 +351,24 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin
}
return &options, nil
}
+
+// StartWatcher starts a new SIGHUP go routine for the current config.
+func StartWatcher(rt *libpod.Runtime) {
+ // Setup the signal notifier
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, utils.SIGHUP)
+
+ go func() {
+ for {
+ // Block until the signal is received
+ logrus.Debugf("waiting for SIGHUP to reload configuration")
+ <-ch
+ if err := rt.Reload(); err != nil {
+ logrus.Errorf("unable to reload configuration: %v", err)
+ continue
+ }
+ }
+ }()
+
+ logrus.Debugf("registered SIGHUP watcher for config")
+}
diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go
index 96b2d754f..3dd7eb0c6 100644
--- a/pkg/ps/ps.go
+++ b/pkg/ps/ps.go
@@ -21,19 +21,17 @@ import (
func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOptions) ([]entities.ListContainer, error) {
var (
- filterFuncs []libpod.ContainerFilter
- pss = []entities.ListContainer{}
+ pss = []entities.ListContainer{}
)
+ filterFuncs := make([]libpod.ContainerFilter, 0, len(options.Filters))
all := options.All || options.Last > 0
if len(options.Filters) > 0 {
for k, v := range options.Filters {
- for _, val := range v {
- generatedFunc, err := lpfilters.GenerateContainerFilterFuncs(k, val, runtime)
- if err != nil {
- return nil, err
- }
- filterFuncs = append(filterFuncs, generatedFunc)
+ generatedFunc, err := lpfilters.GenerateContainerFilterFuncs(k, v, runtime)
+ if err != nil {
+ return nil, err
}
+ filterFuncs = append(filterFuncs, generatedFunc)
}
}
@@ -43,7 +41,7 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp
all = true
}
if !all {
- runningOnly, err := lpfilters.GenerateContainerFilterFuncs("status", define.ContainerStateRunning.String(), runtime)
+ runningOnly, err := lpfilters.GenerateContainerFilterFuncs("status", []string{define.ContainerStateRunning.String()}, runtime)
if err != nil {
return nil, err
}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 415fd169b..f6a084c00 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -6,6 +6,7 @@ import (
"os"
"os/user"
"path/filepath"
+ "regexp"
"strconv"
"strings"
"sync"
@@ -84,6 +85,17 @@ func StringInSlice(s string, sl []string) bool {
return false
}
+// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool
+func StringMatchRegexSlice(s string, re []string) bool {
+ for _, r := range re {
+ m, err := regexp.MatchString(r, s)
+ if err == nil && m {
+ return true
+ }
+ }
+ return false
+}
+
// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
// by containers/image, but containing additional fields that are not supported
// by OCIv1 (but are by Docker v2) - notably OnBuild.
diff --git a/test/apiv2/rest_api/test_rest_v2_0_0.py b/test/apiv2/rest_api/test_rest_v2_0_0.py
index 7192347c7..49e18f063 100644
--- a/test/apiv2/rest_api/test_rest_v2_0_0.py
+++ b/test/apiv2/rest_api/test_rest_v2_0_0.py
@@ -187,12 +187,14 @@ class TestApi(unittest.TestCase):
payload = json.loads(create.text)
self.assertIsNotNone(payload["Id"])
- connect = requests.post(
- PODMAN_URL + "/v1.40/networks/TestNetwork/connect",
- json={"Container": payload["Id"]},
- )
- self.assertEqual(connect.status_code, 200, create.text)
- self.assertEqual(connect.text, "OK\n")
+ # This cannot be done until full completion of the network connect
+ # stack and network disconnect stack are complete
+ # connect = requests.post(
+ # PODMAN_URL + "/v1.40/networks/TestNetwork/connect",
+ # json={"Container": payload["Id"]},
+ # )
+ # self.assertEqual(connect.status_code, 200, connect.text)
+ # self.assertEqual(connect.text, "OK\n")
def test_commit(self):
r = requests.post(_url(ctnr("/commit?container={}")))
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 7ae474c76..92e4544f9 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -1482,4 +1482,19 @@ MemoryReservation: {{ .HostConfig.MemoryReservation }}`})
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).To(ContainSubstring("journald"))
})
+
+ It("podman play kube test only creating the containers", func() {
+ pod := getPod()
+ err := generateKubeYaml("pod", pod, kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", "--start=false", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "{{ .State.Running }}"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(Equal("false"))
+ })
})
diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go
index ccdf0a423..25212991d 100644
--- a/test/e2e/pod_inspect_test.go
+++ b/test/e2e/pod_inspect_test.go
@@ -99,4 +99,23 @@ var _ = Describe("Podman pod inspect", func() {
Expect(len(inspectJSON.InfraConfig.PortBindings["80/tcp"])).To(Equal(1))
Expect(inspectJSON.InfraConfig.PortBindings["80/tcp"][0].HostPort).To(Equal("8080"))
})
+
+ It("podman pod inspect outputs show correct MAC", func() {
+ SkipIfRootless("--mac-address is not supported in rootless mode")
+ podName := "testPod"
+ macAddr := "42:43:44:00:00:01"
+ create := podmanTest.Podman([]string{"pod", "create", "--name", podName, "--mac-address", macAddr})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(Equal(0))
+
+ create = podmanTest.Podman([]string{"run", "-d", "--pod", podName, ALPINE, "top"})
+ create.WaitWithDefaultTimeout()
+ Expect(create.ExitCode()).To(Equal(0))
+
+ inspectOut := podmanTest.Podman([]string{"pod", "inspect", podName})
+ inspectOut.WaitWithDefaultTimeout()
+ Expect(inspectOut.ExitCode()).To(Equal(0))
+
+ Expect(inspectOut.OutputToString()).To(ContainSubstring(macAddr))
+ })
})
diff --git a/test/e2e/pod_pod_namespaces.go b/test/e2e/pod_pod_namespaces_test.go
index 20b8bdb39..20b8bdb39 100644
--- a/test/e2e/pod_pod_namespaces.go
+++ b/test/e2e/pod_pod_namespaces_test.go
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index f3a66e58a..fd08d4308 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -545,4 +545,126 @@ var _ = Describe("Podman ps", func() {
Expect(result.ExitCode()).To(Equal(0))
Expect(result.OutputToString()).To(ContainSubstring("ago"))
})
+
+ It("podman ps filter test", func() {
+ session := podmanTest.Podman([]string{"run", "-d", "--name", "test1", "--label", "foo=1",
+ "--label", "bar=2", "--volume", "volume1:/test", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid1 := session.OutputToString()
+
+ session = podmanTest.Podman([]string{"run", "--name", "test2", "--label", "foo=1",
+ ALPINE, "ls", "/fail"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(1))
+
+ session = podmanTest.Podman([]string{"create", "--name", "test3", ALPINE, cid1})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"run", "--name", "test4", "--volume", "volume1:/test1",
+ "--volume", "/:/test2", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "name=test"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(5))
+ Expect(session.LineInOutputContains("test1")).To(BeTrue())
+ Expect(session.LineInOutputContains("test2")).To(BeTrue())
+ Expect(session.LineInOutputContains("test3")).To(BeTrue())
+ Expect(session.LineInOutputContains("test4")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "name=test1", "--filter", "name=test2"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(3))
+ Expect(session.LineInOutputContains("test1")).To(BeTrue())
+ Expect(session.LineInOutputContains("test2")).To(BeTrue())
+
+ // check container id matches with regex
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "id=" + cid1[:40], "--filter", "id=" + cid1 + "$"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.LineInOutputContains("test1")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--filter", "status=created"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.LineInOutputContains("test3")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--filter", "status=created", "--filter", "status=exited"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(4))
+ Expect(session.LineInOutputContains("test2")).To(BeTrue())
+ Expect(session.LineInOutputContains("test3")).To(BeTrue())
+ Expect(session.LineInOutputContains("test4")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "label=foo=1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(3))
+ Expect(session.LineInOutputContains("test1")).To(BeTrue())
+ Expect(session.LineInOutputContains("test2")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--filter", "label=foo=1", "--filter", "status=exited"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.LineInOutputContains("test2")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "label=foo=1", "--filter", "label=non=1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(1))
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "label=foo=1", "--filter", "label=bar=2"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.LineInOutputContains("test1")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "exited=1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.LineInOutputContains("test2")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "exited=1", "--filter", "exited=0"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(3))
+ Expect(session.LineInOutputContains("test2")).To(BeTrue())
+ Expect(session.LineInOutputContains("test4")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "volume=volume1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(3))
+ Expect(session.LineInOutputContains("test1")).To(BeTrue())
+ Expect(session.LineInOutputContains("test4")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "volume=/:/test2"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.LineInOutputContains("test4")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "before=test2"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
+ Expect(session.LineInOutputContains("test1")).To(BeTrue())
+
+ session = podmanTest.Podman([]string{"ps", "--all", "--filter", "since=test2"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(len(session.OutputToStringArray())).To(Equal(3))
+ Expect(session.LineInOutputContains("test3")).To(BeTrue())
+ Expect(session.LineInOutputContains("test4")).To(BeTrue())
+ })
})
diff --git a/test/e2e/run_seccomp.go b/test/e2e/run_seccomp_test.go
index 7d04cc60a..7d04cc60a 100644
--- a/test/e2e/run_seccomp.go
+++ b/test/e2e/run_seccomp_test.go
diff --git a/test/e2e/run_security_labels.go b/test/e2e/run_security_labels_test.go
index 0c5621e3f..0c5621e3f 100644
--- a/test/e2e/run_security_labels.go
+++ b/test/e2e/run_security_labels_test.go
diff --git a/test/e2e/run_working_dir.go b/test/e2e/run_working_dir_test.go
index 7d8db361c..7d8db361c 100644
--- a/test/e2e/run_working_dir.go
+++ b/test/e2e/run_working_dir_test.go
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 7747cdd0e..5c3c69fd4 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -93,10 +93,10 @@ registries = ['{{.Host}}:{{.Port}}']`
})
It("podman search single registry flag", func() {
- search := podmanTest.Podman([]string{"search", "quay.io/libpod/gate:latest"})
+ search := podmanTest.Podman([]string{"search", "quay.io/skopeo/stable:latest"})
search.WaitWithDefaultTimeout()
Expect(search.ExitCode()).To(Equal(0))
- Expect(search.LineInOutputContains("quay.io/libpod/gate")).To(BeTrue())
+ Expect(search.LineInOutputContains("quay.io/skopeo/stable")).To(BeTrue())
})
It("podman search image with description", func() {
diff --git a/test/python/docker/test_containers.py b/test/python/docker/test_containers.py
index 5fb340fd4..0fd419d9d 100644
--- a/test/python/docker/test_containers.py
+++ b/test/python/docker/test_containers.py
@@ -60,10 +60,14 @@ class TestContainers(unittest.TestCase):
def test_create_network(self):
net = self.client.networks.create("testNetwork", driver="bridge")
ctnr = self.client.containers.create(image="alpine", detach=True)
- net.connect(ctnr)
- nets = self.client.networks.list(greedy=True)
- self.assertGreaterEqual(len(nets), 1)
+ # TODO fix when ready
+ # This test will not work until all connect|disconnect
+ # code is fixed.
+ # net.connect(ctnr)
+
+ # nets = self.client.networks.list(greedy=True)
+ # self.assertGreaterEqual(len(nets), 1)
# TODO fix endpoint to include containers
# for n in nets:
diff --git a/troubleshooting.md b/troubleshooting.md
index 604ca9b1d..3ff578142 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -457,7 +457,7 @@ Attempts to run podman result in
One workaround is to disable Secure Boot in your BIOS.
-### 20) error creating libpod runtime: there might not be enough IDs available in the namespace
+### 19) error creating libpod runtime: there might not be enough IDs available in the namespace
Unable to pull images