summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libpod/container_internal.go9
-rw-r--r--libpod/container_internal_linux.go10
-rw-r--r--libpod/networking_linux.go1
-rw-r--r--libpod/oci_conmon_linux.go1
-rw-r--r--libpod/oci_util.go13
-rw-r--r--pkg/domain/entities/events.go4
-rw-r--r--pkg/machine/qemu/machine.go10
-rw-r--r--pkg/rootlessport/rootlessport_linux.go5
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_system.py8
-rw-r--r--test/e2e/run_cgroup_parent_test.go32
-rw-r--r--test/system/500-networking.bats119
11 files changed, 145 insertions, 67 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 4d1a25541..18b80475b 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -293,6 +293,15 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
}
}
+ // setup rootlesskit port forwarder again since it dies when conmon exits
+ // we use rootlesskit port forwarder only as rootless and when bridge network is used
+ if rootless.IsRootless() && c.config.NetMode.IsBridge() && len(c.config.PortMappings) > 0 {
+ err := c.runtime.setupRootlessPortMappingViaRLK(c, c.state.NetNS.Path())
+ if err != nil {
+ return false, err
+ }
+ }
+
if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, true); err != nil {
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index eabe8efd2..ae029dc62 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -2489,11 +2489,6 @@ func (c *Container) getOCICgroupPath() (string, error) {
switch {
case c.config.NoCgroups:
return "", nil
- case (rootless.IsRootless() && (cgroupManager == config.CgroupfsCgroupsManager || !unified)):
- if !isRootlessCgroupSet(c.config.CgroupParent) {
- return "", nil
- }
- return c.config.CgroupParent, nil
case c.config.CgroupsMode == cgroupSplit:
if c.config.CgroupParent != "" {
return c.config.CgroupParent, nil
@@ -2510,6 +2505,11 @@ func (c *Container) getOCICgroupPath() (string, error) {
systemdCgroups := fmt.Sprintf("%s:libpod:%s", path.Base(c.config.CgroupParent), c.ID())
logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups)
return systemdCgroups, nil
+ case (rootless.IsRootless() && (cgroupManager == config.CgroupfsCgroupsManager || !unified)):
+ if c.config.CgroupParent == "" || !isRootlessCgroupSet(c.config.CgroupParent) {
+ return "", nil
+ }
+ fallthrough
case cgroupManager == config.CgroupfsCgroupsManager:
cgroupPath := filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID()))
logrus.Debugf("Setting CGroup path for container %s to %s", c.ID(), cgroupPath)
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 9aa6cab15..b0d4e0b2d 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -718,6 +718,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
// set up port forwarder for CNI-in-slirp4netns
netnsPath := ctr.state.NetNS.Path()
// TODO: support slirp4netns port forwarder as well
+ // make sure to fix this container.handleRestartPolicy() as well
return r.setupRootlessPortMappingViaRLK(ctr, netnsPath)
}
return nil
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index c00d83f95..831e89223 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1140,6 +1140,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
if err != nil {
return err
}
+ filesToClose = append(filesToClose, ports...)
// Leak the port we bound in the conmon process. These fd's won't be used
// by the container and conmon will keep the ports busy so that another
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index 1cafd5863..f2843b09b 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -68,6 +68,12 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
return nil, errors.Wrapf(err, "cannot get file for UDP socket")
}
files = append(files, f)
+ // close the listener
+ // note that this does not affect the fd, see the godoc for server.File()
+ err = server.Close()
+ if err != nil {
+ logrus.Warnf("failed to close connection: %v", err)
+ }
case "tcp":
var (
@@ -96,6 +102,13 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
return nil, errors.Wrapf(err, "cannot get file for TCP socket")
}
files = append(files, f)
+ // close the listener
+ // note that this does not affect the fd, see the godoc for server.File()
+ err = server.Close()
+ if err != nil {
+ logrus.Warnf("failed to close connection: %v", err)
+ }
+
case "sctp":
if !notifySCTP {
notifySCTP = true
diff --git a/pkg/domain/entities/events.go b/pkg/domain/entities/events.go
index 5e7cc9ad1..73a375b94 100644
--- a/pkg/domain/entities/events.go
+++ b/pkg/domain/entities/events.go
@@ -60,6 +60,10 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
attributes["name"] = e.Name
attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode)
return &Event{dockerEvents.Message{
+ // Compatibility with clients that still look for deprecated API elements
+ Status: e.Status.String(),
+ ID: e.ID,
+ From: e.Image,
Type: e.Type.String(),
Action: e.Status.String(),
Actor: dockerEvents.Actor{
diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go
index 5d8c6e6ce..d5f538594 100644
--- a/pkg/machine/qemu/machine.go
+++ b/pkg/machine/qemu/machine.go
@@ -15,6 +15,7 @@ import (
"strings"
"time"
+ "github.com/containers/common/pkg/config"
"github.com/containers/podman/v3/pkg/machine"
"github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/podman/v3/utils"
@@ -627,9 +628,12 @@ func CheckActiveVM() (bool, string, error) {
// startHostNetworking runs a binary on the host system that allows users
// to setup port forwarding to the podman virtual machine
func (v *MachineVM) startHostNetworking() error {
- // TODO we may wish to configure the directory in containers common
- binary := filepath.Join("/usr/libexec/podman/", machine.ForwarderBinaryName)
- if _, err := os.Stat(binary); err != nil {
+ cfg, err := config.Default()
+ if err != nil {
+ return err
+ }
+ binary, err := cfg.FindHelperBinary(machine.ForwarderBinaryName, false)
+ if err != nil {
return err
}
diff --git a/pkg/rootlessport/rootlessport_linux.go b/pkg/rootlessport/rootlessport_linux.go
index 730d91aa2..10d135e0b 100644
--- a/pkg/rootlessport/rootlessport_linux.go
+++ b/pkg/rootlessport/rootlessport_linux.go
@@ -218,6 +218,9 @@ outer:
// we only need to have a socket to reload ports when we run under rootless cni
if cfg.RootlessCNI {
+ socketfile := filepath.Join(socketDir, cfg.ContainerID)
+ // make sure to remove the file if it exists to prevent EADDRINUSE
+ _ = os.Remove(socketfile)
// workaround to bypass the 108 char socket path limit
// open the fd and use the path to the fd as bind argument
fd, err := unix.Open(socketDir, unix.O_PATH, 0)
@@ -229,6 +232,8 @@ outer:
return err
}
err = unix.Close(fd)
+ // remove the socket file on exit
+ defer os.Remove(socketfile)
if err != nil {
logrus.Warnf("failed to close the socketDir fd: %v", err)
}
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_system.py b/test/apiv2/python/rest_api/test_v2_0_0_system.py
index 2d3935c9c..8171abb84 100644
--- a/test/apiv2/python/rest_api/test_v2_0_0_system.py
+++ b/test/apiv2/python/rest_api/test_v2_0_0_system.py
@@ -29,6 +29,14 @@ class SystemTestCase(APITestCase):
obj = json.loads(line)
# Actor.ID is uppercase for compatibility
self.assertIn("ID", obj["Actor"])
+ # Verify 1.22+ deprecated variants are present if current originals are
+ if (obj["Actor"]["ID"]):
+ self.assertEqual(obj["Actor"]["ID"], obj["id"])
+ if (obj["Action"]):
+ self.assertEqual(obj["Action"], obj["status"])
+ if (obj["Actor"].get("Attributes") and obj["Actor"]["Attributes"].get("image")):
+ self.assertEqual(obj["Actor"]["Attributes"]["image"], obj["from"])
+
def test_ping(self):
required_headers = (
diff --git a/test/e2e/run_cgroup_parent_test.go b/test/e2e/run_cgroup_parent_test.go
index 82b6c3057..e0e1d4b1d 100644
--- a/test/e2e/run_cgroup_parent_test.go
+++ b/test/e2e/run_cgroup_parent_test.go
@@ -13,6 +13,8 @@ import (
. "github.com/onsi/gomega/gexec"
)
+const cgroupRoot = "/sys/fs/cgroup"
+
var _ = Describe("Podman run with --cgroup-parent", func() {
var (
tempdir string
@@ -64,7 +66,6 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
})
Specify("always honor --cgroup-parent", func() {
- Skip("https://github.com/containers/podman/issues/11165")
SkipIfCgroupV1("test not supported in cgroups v1")
if Containerized() || podmanTest.CgroupManager == "cgroupfs" {
Skip("Requires Systemd cgroup manager support")
@@ -78,36 +79,31 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
Expect(run).Should(Exit(0))
cid := run.OutputToString()
- exec := podmanTest.Podman([]string{"exec", cid, "cat", "/proc/self/cgroup"})
+ exec := podmanTest.Podman([]string{"exec", cid, "cat", "/proc/1/cgroup"})
exec.WaitWithDefaultTimeout()
Expect(exec).Should(Exit(0))
containerCgroup := strings.TrimRight(strings.Replace(exec.OutputToString(), "0::", "", -1), "\n")
- content, err := ioutil.ReadFile(filepath.Join("/sys/fs/cgroup", containerCgroup, "cgroup.procs"))
- Expect(err).To(BeNil())
-
// Move the container process to a sub cgroup
- subCgroupPath := filepath.Join(filepath.Join("/sys/fs/cgroup", containerCgroup, "old-container"))
-
- err = os.MkdirAll(subCgroupPath, 0755)
+ content, err := ioutil.ReadFile(filepath.Join(cgroupRoot, containerCgroup, "cgroup.procs"))
Expect(err).To(BeNil())
-
- err = ioutil.WriteFile(filepath.Join(subCgroupPath, "cgroup.procs"), content, 0644)
+ oldSubCgroupPath := filepath.Join(filepath.Join(cgroupRoot, containerCgroup, "old-container"))
+ err = os.MkdirAll(oldSubCgroupPath, 0755)
+ Expect(err).To(BeNil())
+ err = ioutil.WriteFile(filepath.Join(oldSubCgroupPath, "cgroup.procs"), content, 0644)
Expect(err).To(BeNil())
- cgroup := filepath.Dir(containerCgroup)
+ newCgroup := fmt.Sprintf("%s/new-container", containerCgroup)
+ err = os.MkdirAll(filepath.Join(cgroupRoot, newCgroup), 0755)
+ Expect(err).To(BeNil())
- run = podmanTest.Podman([]string{"--cgroup-manager=cgroupfs", "run", "-d", fmt.Sprintf("--cgroup-parent=%s", cgroup), fedoraMinimal, "sleep", "100"})
+ run = podmanTest.Podman([]string{"--cgroup-manager=cgroupfs", "run", "--rm", "--cgroupns=host", fmt.Sprintf("--cgroup-parent=%s", newCgroup), fedoraMinimal, "cat", "/proc/self/cgroup"})
run.WaitWithDefaultTimeout()
Expect(run).Should(Exit(0))
+ cgroupEffective := strings.TrimRight(strings.Replace(run.OutputToString(), "0::", "", -1), "\n")
- exec = podmanTest.Podman([]string{"exec", cid, "cat", "/proc/self/cgroup"})
- exec.WaitWithDefaultTimeout()
- Expect(exec).Should(Exit(0))
- cgroupEffective := filepath.Dir(strings.TrimRight(strings.Replace(exec.OutputToString(), "0::", "", -1), "\n"))
-
- Expect(cgroupEffective).To(Equal(cgroup))
+ Expect(newCgroup).To(Equal(filepath.Dir(cgroupEffective)))
})
Specify("valid --cgroup-parent using slice", func() {
diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats
index ad5891dd9..bdedfae19 100644
--- a/test/system/500-networking.bats
+++ b/test/system/500-networking.bats
@@ -32,7 +32,6 @@ load helpers
# Bind-mount this file with a different name to a container running httpd
run_podman run -d --name myweb -p "$HOST_PORT:80" \
- --restart always \
-v $INDEX1:/var/www/index.txt:Z \
-w /var/www \
$IMAGE /bin/busybox-extras httpd -f -p 80
@@ -67,46 +66,6 @@ load helpers
run_podman 125 port myweb 99/tcp
is "$output" 'Error: failed to find published port "99/tcp"'
- # Tests #10310: podman will restart slirp4netns on container restart
- run_podman container inspect --format "{{.State.Pid}}" $cid
- pid=$output
-
- # Kill the process; podman restart policy will bring up a new container.
- # -9 is crucial: busybox httpd ignores all other signals.
- kill -9 $pid
- # Wait for process to exit
- retries=30
- while kill -0 $pid; do
- sleep 0.5
- retries=$((retries - 1))
- if [[ $retries -eq 0 ]]; then
- die "Process $pid (container $cid) refused to die"
- fi
- done
-
- # Wait for container to restart
- retries=20
- while :;do
- run_podman container inspect --format "{{.State.Pid}}" myweb
- # pid is 0 as long as the container is not running
- if [[ $output -ne 0 ]]; then
- if [[ $output == $pid ]]; then
- die "This should never happen! Restarted container has same PID ($output) as killed one!"
- fi
- break
- fi
- sleep 0.5
- retries=$((retries - 1))
- if [[ $retries -eq 0 ]]; then
- die "Timed out waiting for container to restart"
- fi
- done
-
- # Verify http contents again: curl from localhost
- # Use retry since it can take a moment until the new container is ready
- run curl --retry 2 -s $SERVER/index.txt
- is "$output" "$random_1" "curl 127.0.0.1:/index.txt after restart"
-
# Clean up
run_podman stop -t 1 myweb
run_podman rm myweb
@@ -476,4 +435,82 @@ load helpers
run_podman network rm -f $netname $netname2
}
+@test "podman network after restart" {
+ random_1=$(random_string 30)
+
+ HOST_PORT=$(random_free_port)
+ SERVER=http://127.0.0.1:$HOST_PORT
+
+ # Create a test file with random content
+ INDEX1=$PODMAN_TMPDIR/hello.txt
+ echo $random_1 > $INDEX1
+
+ local netname=testnet-$(random_string 10)
+ run_podman network create $netname
+ is "$output" ".*/cni/net.d/$netname.conflist" "output of 'network create'"
+
+ for network in "slirp4netns" "$netname"; do
+ # Start container with the restart always policy
+ run_podman run -d --name myweb -p "$HOST_PORT:80" \
+ --restart always \
+ --network $network \
+ -v $INDEX1:/var/www/index.txt:Z \
+ -w /var/www \
+ $IMAGE /bin/busybox-extras httpd -f -p 80
+ cid=$output
+
+ # Tests #10310: podman will restart slirp4netns on container restart
+ run_podman container inspect --format "{{.State.Pid}}" $cid
+ pid=$output
+
+ # Kill the process; podman restart policy will bring up a new container.
+ # -9 is crucial: busybox httpd ignores all other signals.
+ kill -9 $pid
+ # Wait for process to exit
+ retries=30
+ while kill -0 $pid; do
+ sleep 0.5
+ retries=$((retries - 1))
+ if [[ $retries -eq 0 ]]; then
+ die "Process $pid (container $cid) refused to die"
+ fi
+ done
+
+ # Wait for container to restart
+ retries=20
+ while :;do
+ run_podman container inspect --format "{{.State.Pid}}" $cid
+ # pid is 0 as long as the container is not running
+ if [[ $output -ne 0 ]]; then
+ if [[ $output == $pid ]]; then
+ die "This should never happen! Restarted container has same PID ($output) as killed one!"
+ fi
+ break
+ fi
+ sleep 0.5
+ retries=$((retries - 1))
+ if [[ $retries -eq 0 ]]; then
+ die "Timed out waiting for container to restart"
+ fi
+ done
+
+ # Verify http contents again: curl from localhost
+ # Use retry since it can take a moment until the new container is ready
+ run curl --retry 2 -s $SERVER/index.txt
+ is "$output" "$random_1" "curl 127.0.0.1:/index.txt after auto restart"
+
+ run_podman restart $cid
+ # Verify http contents again: curl from localhost
+ # Use retry since it can take a moment until the new container is ready
+ run curl --retry 2 -s $SERVER/index.txt
+ is "$output" "$random_1" "curl 127.0.0.1:/index.txt after podman restart"
+
+ run_podman stop -t 0 $cid
+ run_podman rm -f $cid
+ done
+
+ # Cleanup network
+ run_podman network rm $netname
+}
+
# vim: filetype=sh