diff options
-rw-r--r-- | .cirrus.yml | 28 | ||||
-rw-r--r-- | cmd/podman/play/kube.go | 3 | ||||
-rwxr-xr-x | contrib/cirrus/setup_environment.sh | 16 | ||||
-rw-r--r-- | docs/source/markdown/podman-play-kube.1.md | 34 | ||||
-rw-r--r-- | go.mod | 2 | ||||
-rw-r--r-- | go.sum | 4 | ||||
-rw-r--r-- | libpod/runtime.go | 10 | ||||
-rw-r--r-- | libpod/runtime_volume_linux.go | 15 | ||||
-rw-r--r-- | pkg/bindings/connection.go | 2 | ||||
-rw-r--r-- | pkg/bindings/images/build.go | 4 | ||||
-rw-r--r-- | pkg/domain/entities/play.go | 2 | ||||
-rw-r--r-- | pkg/domain/infra/abi/play.go | 131 | ||||
-rw-r--r-- | pkg/rootlessport/rootlessport_linux.go | 35 | ||||
-rw-r--r-- | pkg/systemd/dbus.go | 98 | ||||
-rwxr-xr-x | test/buildah-bud/apply-podman-deltas | 8 | ||||
-rw-r--r-- | test/compose/mount_and_label/docker-compose.yml | 2 | ||||
-rw-r--r-- | test/e2e/common_test.go | 21 | ||||
-rw-r--r-- | test/e2e/login_logout_test.go | 11 | ||||
-rw-r--r-- | test/e2e/play_build_test.go | 243 | ||||
-rw-r--r-- | test/e2e/play_kube_test.go | 15 | ||||
-rw-r--r-- | test/e2e/run_test.go | 2 | ||||
-rw-r--r-- | test/e2e/stats_test.go | 3 | ||||
-rw-r--r-- | test/e2e/systemd_test.go | 11 | ||||
-rw-r--r-- | troubleshooting.md | 135 | ||||
-rw-r--r-- | vendor/modules.txt | 2 |
25 files changed, 714 insertions, 123 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index ef6ac5e8b..3fcf335ed 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -30,20 +30,17 @@ env: PRIOR_UBUNTU_NAME: "ubuntu-2010" # Google-cloud VM Images - # TODO: At the time of this comment, an selinux-policy regression is blocking use of updated - # Fedora VM images: https://bugzilla.redhat.com/show_bug.cgi?id=1965743 - IMAGE_SUFFIX_UBUNTU: "c5521575421149184" - IMAGE_SUFFIX: "c5348179051806720" + IMAGE_SUFFIX: "c6737534580424704" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX_UBUNTU}" - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX_UBUNTU}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" + PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}" # Container FQIN's FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}" - UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX_UBUNTU}" - PRIOR_UBUNTU_CONTAINER_FQIN: "quay.io/libpod/prior-ubuntu_podman:${IMAGE_SUFFIX_UBUNTU}" + UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}" + PRIOR_UBUNTU_CONTAINER_FQIN: "quay.io/libpod/prior-ubuntu_podman:${IMAGE_SUFFIX}" #### #### Control variables that determine what to run and how to run it. @@ -632,17 +629,11 @@ rootless_system_test_task: main_script: *main always: *logs_artifacts -# FIXME: we may want to consider running this from nightly cron instead of CI. -# The tests are actually pretty quick (less than a minute) but they do rely -# on pulling images from quay.io, which means we're subject to network flakes. -# -# FIXME: how does this env matrix work, anyway? Does it spin up multiple VMs? -# We might just want to encode the version matrix in runner.sh instead upgrade_test_task: name: "Upgrade test: from $PODMAN_UPGRADE_FROM" alias: upgrade_test skip: *tags - only_if: *not_docs + only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' || $CIRRUS_CRON != '' depends_on: - local_system_test matrix: @@ -677,18 +668,11 @@ meta_task: image: quay.io/libpod/imgts:$IMAGE_SUFFIX env: # Space-separated list of images used by this repository state - # TODO: Protect commonly tagged ubuntu images from puning in case - # workaround for BZ1965743 remains in use beyond the 30-days. - # Ref sha 404d5edb155 IMGNAMES: >- ${FEDORA_CACHE_IMAGE_NAME} ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} ${PRIOR_UBUNTU_CACHE_IMAGE_NAME} - fedora-${IMAGE_SUFFIX_UBUNTU} - prior-fedora-${IMAGE_SUFFIX_UBUNTU} - ubuntu-${IMAGE_SUFFIX} - prior-ubuntu-${IMAGE_SUFFIX} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_REPO_NAME}" GCPJSON: ENCRYPTED[3a198350077849c8df14b723c0f4c9fece9ebe6408d35982e7adf2105a33f8e0e166ed3ed614875a0887e1af2b8775f4] diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go index ece7d1f98..2eebd9f86 100644 --- a/cmd/podman/play/kube.go +++ b/cmd/podman/play/kube.go @@ -100,6 +100,9 @@ func init() { configmapFlagName := "configmap" flags.StringSliceVar(&kubeOptions.ConfigMaps, configmapFlagName, []string{}, "`Pathname` of a YAML file containing a kubernetes configmap") _ = kubeCmd.RegisterFlagCompletionFunc(configmapFlagName, completion.AutocompleteDefault) + + buildFlagName := "build" + flags.BoolVar(&kubeOptions.Build, buildFlagName, false, "Build all images in a YAML (given Containerfiles exist)") } _ = flags.MarkHidden("signature-policy") } diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh index 3f2176fd6..d0c348d58 100755 --- a/contrib/cirrus/setup_environment.sh +++ b/contrib/cirrus/setup_environment.sh @@ -77,6 +77,13 @@ case "$CG_FS_TYPE" in else echo "OCI_RUNTIME=runc" >> /etc/ci_environment fi + + # As a general policy CGv1 + runc should coincide with the "older" + # VM Images in CI. Verify this is the case. + if [[ -n "$VM_IMAGE_NAME" ]] && [[ ! "$VM_IMAGE_NAME" =~ prior ]] + then + die "Most recent distro. version should never run with CGv1" + fi fi ;; cgroup2fs) @@ -85,6 +92,13 @@ case "$CG_FS_TYPE" in # which uses runc as the default. warn "Forcing testing with crun instead of runc" echo "OCI_RUNTIME=crun" >> /etc/ci_environment + + # As a general policy CGv2 + crun should coincide with the "newer" + # VM Images in CI. Verify this is the case. + if [[ -n "$VM_IMAGE_NAME" ]] && [[ "$VM_IMAGE_NAME" =~ prior ]] + then + die "Least recent distro. version should never run with CGv2" + fi fi ;; *) die_unknown CG_FS_TYPE @@ -208,7 +222,7 @@ case "$TEST_FLAVOR" in unit) ;; apiv2) ;& # use next item compose) - dnf install -y $PACKAGE_DOWNLOAD_DIR/podman-docker* + rpm -ivh $PACKAGE_DOWNLOAD_DIR/podman-docker* ;& # continue with next item int) ;& sys) ;& diff --git a/docs/source/markdown/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md index ad5ae7e4c..268e4bbcb 100644 --- a/docs/source/markdown/podman-play-kube.1.md +++ b/docs/source/markdown/podman-play-kube.1.md @@ -35,6 +35,36 @@ A Kubernetes PersistentVolumeClaim represents a Podman named volume. Only the Pe - volume.podman.io/gid - volume.podman.io/mount-options +Play kube is capable of building images on the fly given the correct directory layout and Containerfiles. This +option is not available for remote clients yet. Consider the following excerpt from a YAML file: +``` +apiVersion: v1 +kind: Pod +metadata: +... +spec: + containers: + - command: + - top + - name: container + value: podman + image: foobar +... +``` + +If there is a directory named `foobar` in the current working directory with a file named `Containerfile` or `Dockerfile`, +Podman play kube will build that image and name it `foobar`. An example directory structure for this example would look +like: +``` +|- mykubefiles + |- myplayfile.yaml + |- foobar + |- Containerfile +``` + +The build will consider `foobar` to be the context directory for the build. If there is an image in local storage +called `foobar`, the image will not be built unless the `--build` flag is used. + ## OPTIONS #### **--authfile**=*path* @@ -45,6 +75,10 @@ If the authorization state is not found there, $HOME/.docker/config.json is chec Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE environment variable. `export REGISTRY_AUTH_FILE=path` +#### **--build** + +Build images even if they are found in the local storage. + #### **--cert-dir**=*path* Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry. @@ -53,7 +53,7 @@ require ( github.com/opencontainers/selinux v1.8.4 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 - github.com/rootless-containers/rootlesskit v0.14.4 + github.com/rootless-containers/rootlesskit v0.14.5 github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 @@ -817,8 +817,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rootless-containers/rootlesskit v0.14.4 h1:pqx9a+OC/6jjV7sIUKy3D1p6NLEC6WIMiJWAGsGMCUM= -github.com/rootless-containers/rootlesskit v0.14.4/go.mod h1:Ai3detLzryb/4EkzXmNfh8aByUcBXp/qqkQusJs1SO8= +github.com/rootless-containers/rootlesskit v0.14.5 h1:X4eNt2e1h/uSjlssKqpeTY5fatrjDz9F9FX05RJB7Tw= +github.com/rootless-containers/rootlesskit v0.14.5/go.mod h1:Ai3detLzryb/4EkzXmNfh8aByUcBXp/qqkQusJs1SO8= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= diff --git a/libpod/runtime.go b/libpod/runtime.go index 30659a3d4..1f403790f 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -30,6 +30,7 @@ import ( "github.com/containers/podman/v3/libpod/shutdown" "github.com/containers/podman/v3/pkg/cgroups" "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v3/pkg/systemd" "github.com/containers/podman/v3/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/unshare" @@ -500,6 +501,15 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { // no containers running. Create immediately a namespace, as // we will need to access the storage. if needsUserns { + // warn users if mode is rootless and cgroup manager is systemd + // and no valid systemd session is present + // warn only whenever new namespace is created + if runtime.config.Engine.CgroupManager == config.SystemdCgroupsManager { + unified, _ := cgroups.IsCgroup2UnifiedMode() + if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) { + logrus.Debug("Invalid systemd user session for current user") + } + } aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec. pausePid, err := util.GetRootlessPauseProcessPidPathGivenDir(runtime.config.Engine.TmpDir) if err != nil { diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index 40df98d7c..d1ea7d4fd 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -255,11 +255,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error // Set volume as invalid so it can no longer be used v.valid = false - // Remove the volume from the state - if err := r.state.RemoveVolume(v); err != nil { - return errors.Wrapf(err, "error removing volume %s", v.Name()) - } - var removalErr error // If we use a volume plugin, we need to remove from the plugin. @@ -287,11 +282,19 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error req := new(pluginapi.RemoveRequest) req.Name = v.Name() if err := v.plugin.RemoveVolume(req); err != nil { - removalErr = errors.Wrapf(err, "volume %s could not be removed from plugin %s, but it has been removed from Podman", v.Name(), v.Driver()) + return errors.Wrapf(err, "volume %s could not be removed from plugin %s", v.Name(), v.Driver()) } } } + // Remove the volume from the state + if err := r.state.RemoveVolume(v); err != nil { + if removalErr != nil { + logrus.Errorf("Error removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr) + } + return errors.Wrapf(err, "error removing volume %s", v.Name()) + } + // Free the volume's lock if err := v.lock.Free(); err != nil { if removalErr == nil { diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go index 62b1655ac..cd118cbb2 100644 --- a/pkg/bindings/connection.go +++ b/pkg/bindings/connection.go @@ -117,7 +117,7 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) ctx = context.WithValue(ctx, clientKey, &connection) if err := pingNewConnection(ctx); err != nil { - return nil, errors.Wrap(err, "cannot connect to the Podman socket, please verify that Podman REST API service is running") + return nil, errors.Wrap(err, "cannot connect to the Podman socket, please verify the connection to the Linux system, or use `podman machine` to create/start a Linux VM.") } return ctx, nil } diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go index e1aeae244..39e0fc5df 100644 --- a/pkg/bindings/images/build.go +++ b/pkg/bindings/images/build.go @@ -481,9 +481,9 @@ func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { return nil // skip root dir } - name := strings.TrimPrefix(path, s+string(filepath.Separator)) + name := filepath.ToSlash(strings.TrimPrefix(path, s+string(filepath.Separator))) - excluded, err := pm.Matches(filepath.ToSlash(name)) // nolint:staticcheck + excluded, err := pm.Matches(name) // nolint:staticcheck if err != nil { return errors.Wrapf(err, "error checking if %q is excluded", name) } diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go index 89dfc08e9..01de73ebe 100644 --- a/pkg/domain/entities/play.go +++ b/pkg/domain/entities/play.go @@ -10,6 +10,8 @@ import ( type PlayKubeOptions struct { // Authfile - path to an authentication file. Authfile string + // Indicator to build all images with Containerfile or Dockerfile + Build bool // CertDir - to a directory containing TLS certifications and keys. CertDir string // Username for authenticating against the registry. diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go index d257bad18..6224feff5 100644 --- a/pkg/domain/infra/abi/play.go +++ b/pkg/domain/infra/abi/play.go @@ -7,9 +7,11 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "strconv" "strings" + buildahDefine "github.com/containers/buildah/define" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" @@ -266,39 +268,69 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers)) + cwd, err := os.Getwd() + if err != nil { + return nil, err + } for _, container := range podYAML.Spec.Containers { // Contains all labels obtained from kube labels := make(map[string]string) - - // NOTE: set the pull policy to "newer". This will cover cases - // where the "latest" tag requires a pull and will also - // transparently handle "localhost/" prefixed files which *may* - // refer to a locally built image OR an image running a - // registry on localhost. - pullPolicy := config.PullPolicyNewer - if len(container.ImagePullPolicy) > 0 { - // Make sure to lower the strings since K8s pull policy - // may be capitalized (see bugzilla.redhat.com/show_bug.cgi?id=1985905). - rawPolicy := string(container.ImagePullPolicy) - pullPolicy, err = config.ParsePullPolicy(strings.ToLower(rawPolicy)) - if err != nil { - return nil, err - } + var pulledImage *libimage.Image + buildFile, err := getBuildFile(container.Image, cwd) + if err != nil { + return nil, err } - // This ensures the image is the image store - pullOptions := &libimage.PullOptions{} - pullOptions.AuthFilePath = options.Authfile - pullOptions.CertDirPath = options.CertDir - pullOptions.SignaturePolicyPath = options.SignaturePolicy - pullOptions.Writer = writer - pullOptions.Username = options.Username - pullOptions.Password = options.Password - pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify - - pulledImages, err := ic.Libpod.LibimageRuntime().Pull(ctx, container.Image, pullPolicy, pullOptions) + existsLocally, err := ic.Libpod.LibimageRuntime().Exists(container.Image) if err != nil { return nil, err } + if (len(buildFile) > 0 && !existsLocally) || (len(buildFile) > 0 && options.Build) { + buildOpts := new(buildahDefine.BuildOptions) + commonOpts := new(buildahDefine.CommonBuildOptions) + buildOpts.ConfigureNetwork = buildahDefine.NetworkDefault + buildOpts.Isolation = buildahDefine.IsolationChroot + buildOpts.CommonBuildOpts = commonOpts + buildOpts.Output = container.Image + if _, _, err := ic.Libpod.Build(ctx, *buildOpts, []string{buildFile}...); err != nil { + return nil, err + } + i, _, err := ic.Libpod.LibimageRuntime().LookupImage(container.Image, new(libimage.LookupImageOptions)) + if err != nil { + return nil, err + } + pulledImage = i + } else { + // NOTE: set the pull policy to "newer". This will cover cases + // where the "latest" tag requires a pull and will also + // transparently handle "localhost/" prefixed files which *may* + // refer to a locally built image OR an image running a + // registry on localhost. + pullPolicy := config.PullPolicyNewer + if len(container.ImagePullPolicy) > 0 { + // Make sure to lower the strings since K8s pull policy + // may be capitalized (see bugzilla.redhat.com/show_bug.cgi?id=1985905). + rawPolicy := string(container.ImagePullPolicy) + pullPolicy, err = config.ParsePullPolicy(strings.ToLower(rawPolicy)) + if err != nil { + return nil, err + } + } + // This ensures the image is the image store + pullOptions := &libimage.PullOptions{} + pullOptions.AuthFilePath = options.Authfile + pullOptions.CertDirPath = options.CertDir + pullOptions.SignaturePolicyPath = options.SignaturePolicy + pullOptions.Writer = writer + pullOptions.Username = options.Username + pullOptions.Password = options.Password + pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify + + pulledImages, err := ic.Libpod.LibimageRuntime().Pull(ctx, container.Image, pullPolicy, pullOptions) + if err != nil { + return nil, err + } + pulledImage = pulledImages[0] + } // Handle kube annotations for k, v := range annotations { @@ -318,7 +350,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY specgenOpts := kube.CtrSpecGenOptions{ Container: container, - Image: pulledImages[0], + Image: pulledImage, Volumes: volumes, PodID: pod.ID(), PodName: podName, @@ -509,3 +541,48 @@ func sortKubeKinds(documentList [][]byte) ([][]byte, error) { return sortedDocumentList, nil } +func imageNamePrefix(imageName string) string { + prefix := imageName + s := strings.Split(prefix, ":") + if len(s) > 0 { + prefix = s[0] + } + s = strings.Split(prefix, "/") + if len(s) > 0 { + prefix = s[len(s)-1] + } + s = strings.Split(prefix, "@") + if len(s) > 0 { + prefix = s[0] + } + return prefix +} + +func getBuildFile(imageName string, cwd string) (string, error) { + buildDirName := imageNamePrefix(imageName) + containerfilePath := filepath.Join(cwd, buildDirName, "Containerfile") + dockerfilePath := filepath.Join(cwd, buildDirName, "Dockerfile") + + _, err := os.Stat(filepath.Join(containerfilePath)) + if err == nil { + logrus.Debugf("building %s with %s", imageName, containerfilePath) + return containerfilePath, nil + } + // If the error is not because the file does not exist, take + // a mulligan and try Dockerfile. If that also fails, return that + // error + if err != nil && !os.IsNotExist(err) { + logrus.Errorf("%v: unable to check for %s", err, containerfilePath) + } + + _, err = os.Stat(filepath.Join(dockerfilePath)) + if err == nil { + logrus.Debugf("building %s with %s", imageName, dockerfilePath) + return dockerfilePath, nil + } + // Strike two + if os.IsNotExist(err) { + return "", nil + } + return "", err +} diff --git a/pkg/rootlessport/rootlessport_linux.go b/pkg/rootlessport/rootlessport_linux.go index ede216bfe..9a2f93f8e 100644 --- a/pkg/rootlessport/rootlessport_linux.go +++ b/pkg/rootlessport/rootlessport_linux.go @@ -20,7 +20,6 @@ import ( "net" "os" "os/exec" - "os/signal" "path/filepath" "github.com/containernetworking/plugins/pkg/ns" @@ -106,30 +105,6 @@ func parent() error { return err } - exitC := make(chan os.Signal, 1) - defer close(exitC) - - go func() { - sigC := make(chan os.Signal, 1) - signal.Notify(sigC, unix.SIGPIPE) - defer func() { - signal.Stop(sigC) - close(sigC) - }() - - select { - case s := <-sigC: - if s == unix.SIGPIPE { - if f, err := os.OpenFile("/dev/null", os.O_WRONLY, 0755); err == nil { - unix.Dup2(int(f.Fd()), 1) // nolint:errcheck - unix.Dup2(int(f.Fd()), 2) // nolint:errcheck - f.Close() - } - } - case <-exitC: - } - }() - socketDir := filepath.Join(cfg.TmpDir, "rp") err = os.MkdirAll(socketDir, 0700) if err != nil { @@ -251,8 +226,16 @@ outer: go serve(socket, driver) } - // write and close ReadyFD (convention is same as slirp4netns --ready-fd) logrus.Info("ready") + + // https://github.com/containers/podman/issues/11248 + // Copy /dev/null to stdout and stderr to prevent SIGPIPE errors + if f, err := os.OpenFile("/dev/null", os.O_WRONLY, 0755); err == nil { + unix.Dup2(int(f.Fd()), 1) // nolint:errcheck + unix.Dup2(int(f.Fd()), 2) // nolint:errcheck + f.Close() + } + // write and close ReadyFD (convention is same as slirp4netns --ready-fd) if _, err := readyW.Write([]byte("1")); err != nil { return err } diff --git a/pkg/systemd/dbus.go b/pkg/systemd/dbus.go index 718082526..c49f537b6 100644 --- a/pkg/systemd/dbus.go +++ b/pkg/systemd/dbus.go @@ -9,8 +9,106 @@ import ( "github.com/containers/podman/v3/pkg/rootless" "github.com/coreos/go-systemd/v22/dbus" godbus "github.com/godbus/dbus/v5" + "github.com/sirupsen/logrus" ) +// IsSystemdSessionValid checks if sessions is valid for provided rootless uid. +func IsSystemdSessionValid(uid int) bool { + var conn *godbus.Conn + var err error + var object godbus.BusObject + var seat0Path godbus.ObjectPath + dbusDest := "org.freedesktop.login1" + dbusInterface := "org.freedesktop.login1.Manager" + dbusPath := "/org/freedesktop/login1" + + if rootless.IsRootless() { + conn, err = GetLogindConnection(rootless.GetRootlessUID()) + object = conn.Object(dbusDest, godbus.ObjectPath(dbusPath)) + if err != nil { + //unable to fetch systemd object for logind + logrus.Debugf("systemd-logind: %s", err) + return false + } + object = conn.Object(dbusDest, godbus.ObjectPath(dbusPath)) + if err := object.Call(dbusInterface+".GetSeat", 0, "seat0").Store(&seat0Path); err != nil { + //unable to get seat0 path. + logrus.Debugf("systemd-logind: %s", err) + return false + } + seat0Obj := conn.Object(dbusDest, seat0Path) + activeSession, err := seat0Obj.GetProperty(dbusDest + ".Seat.ActiveSession") + if err != nil { + //unable to get active sessions. + logrus.Debugf("systemd-logind: %s", err) + return false + } + activeSessionMap, ok := activeSession.Value().([]interface{}) + if !ok || len(activeSessionMap) < 2 { + //unable to get active session map. + logrus.Debugf("systemd-logind: %s", err) + return false + } + activeSessionPath, ok := activeSessionMap[1].(godbus.ObjectPath) + if !ok { + //unable to fetch active session path. + logrus.Debugf("systemd-logind: %s", err) + return false + } + activeSessionObj := conn.Object(dbusDest, activeSessionPath) + sessionUser, err := activeSessionObj.GetProperty(dbusDest + ".Session.User") + if err != nil { + //unable to fetch session user from activeSession path. + logrus.Debugf("systemd-logind: %s", err) + return false + } + dbusUser, ok := sessionUser.Value().([]interface{}) + if !ok { + // not a valid user. + return false + } + if len(dbusUser) < 2 { + // not a valid session user. + return false + } + activeUID, ok := dbusUser[0].(uint32) + if !ok { + return false + } + //active session found which belongs to following rootless user + if activeUID == uint32(uid) { + return true + } + return false + } + return true +} + +// GetDbusConnection returns an user connection to D-BUS +func GetLogindConnection(uid int) (*godbus.Conn, error) { + return dbusAuthConnectionLogind(uid) +} + +func dbusAuthConnectionLogind(uid int) (*godbus.Conn, error) { + var conn *godbus.Conn + var err error + conn, err = godbus.SystemBusPrivate() + if err != nil { + return nil, err + } + methods := []godbus.Auth{godbus.AuthExternal(strconv.Itoa(uid))} + if err = conn.Auth(methods); err != nil { + conn.Close() + return nil, err + } + err = conn.Hello() + if err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + func dbusAuthRootlessConnection(createBus func(opts ...godbus.ConnOption) (*godbus.Conn, error)) (*godbus.Conn, error) { conn, err := createBus() if err != nil { diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas index 18b3d56f9..44a33b0b8 100755 --- a/test/buildah-bud/apply-podman-deltas +++ b/test/buildah-bud/apply-podman-deltas @@ -165,14 +165,6 @@ skip "FIXME FIXME FIXME: this passes on Ed's laptop, fails in CI??" \ skip "buildah runs with --cgroup-manager=cgroupfs, podman with systemd" \ "bud with --cgroup-parent" -# see https://github.com/containers/podman/pull/10829 -skip "FIXME FIXME FIXME - requires updated CI images (#10829)" \ - "bud with --runtime and --runtime-flag" - -############################################################################### -# BEGIN tests which are skipped due to actual podman bugs. - - ############################################################################### # BEGIN tests which are skipped because they make no sense under podman-remote diff --git a/test/compose/mount_and_label/docker-compose.yml b/test/compose/mount_and_label/docker-compose.yml index 112d7e134..81fda2512 100644 --- a/test/compose/mount_and_label/docker-compose.yml +++ b/test/compose/mount_and_label/docker-compose.yml @@ -6,5 +6,7 @@ services: - '5000:5000' volumes: - /tmp/data:/data:ro + security_opt: + - label=disable labels: - "io.podman=the_best" diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 2e48e1763..6b97c4162 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -645,9 +645,13 @@ func isRootless() bool { return os.Geteuid() != 0 } +func isCgroupsV1() bool { + return !CGROUPSV2 +} + func SkipIfCgroupV1(reason string) { checkReason(reason) - if !CGROUPSV2 { + if isCgroupsV1() { Skip(reason) } } @@ -841,3 +845,18 @@ func (p *PodmanTestIntegration) buildImage(dockerfile, imageName string, layers output := session.OutputToStringArray() return output[len(output)-1] } + +func writeYaml(content string, fileName string) error { + f, err := os.Create(fileName) + if err != nil { + return err + } + defer f.Close() + + _, err = f.WriteString(content) + if err != nil { + return err + } + + return nil +} diff --git a/test/e2e/login_logout_test.go b/test/e2e/login_logout_test.go index 7ad1fc1f2..d8ca9cbd9 100644 --- a/test/e2e/login_logout_test.go +++ b/test/e2e/login_logout_test.go @@ -79,9 +79,9 @@ var _ = Describe("Podman login and logout", func() { session = podmanTest.Podman([]string{"run", "-d", "-p", strings.Join([]string{strconv.Itoa(port), strconv.Itoa(port)}, ":"), "-e", strings.Join([]string{"REGISTRY_HTTP_ADDR=0.0.0.0", strconv.Itoa(port)}, ":"), "--name", "registry", "-v", - strings.Join([]string{authPath, "/auth"}, ":"), "-e", "REGISTRY_AUTH=htpasswd", "-e", + strings.Join([]string{authPath, "/auth:Z"}, ":"), "-e", "REGISTRY_AUTH=htpasswd", "-e", "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm", "-e", "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd", - "-v", strings.Join([]string{certPath, "/certs"}, ":"), "-e", "REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt", + "-v", strings.Join([]string{certPath, "/certs:Z"}, ":"), "-e", "REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt", "-e", "REGISTRY_HTTP_TLS_KEY=/certs/domain.key", "registry:2.6"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) @@ -235,10 +235,13 @@ var _ = Describe("Podman login and logout", func() { setup.WaitWithDefaultTimeout() defer os.RemoveAll(certDir) + // N/B: This second registry container shares the same auth and cert dirs + // as the registry started from BeforeEach(). Since this one starts + // second, re-labeling the volumes should keep SELinux happy. session := podmanTest.Podman([]string{"run", "-d", "-p", "9001:9001", "-e", "REGISTRY_HTTP_ADDR=0.0.0.0:9001", "--name", "registry1", "-v", - strings.Join([]string{authPath, "/auth"}, ":"), "-e", "REGISTRY_AUTH=htpasswd", "-e", + strings.Join([]string{authPath, "/auth:z"}, ":"), "-e", "REGISTRY_AUTH=htpasswd", "-e", "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm", "-e", "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd", - "-v", strings.Join([]string{certPath, "/certs"}, ":"), "-e", "REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt", + "-v", strings.Join([]string{certPath, "/certs:z"}, ":"), "-e", "REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt", "-e", "REGISTRY_HTTP_TLS_KEY=/certs/domain.key", "registry:2.6"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) diff --git a/test/e2e/play_build_test.go b/test/e2e/play_build_test.go new file mode 100644 index 000000000..16f2687f3 --- /dev/null +++ b/test/e2e/play_build_test.go @@ -0,0 +1,243 @@ +// +build !remote + +// build for play kube is not supported on remote yet. + +package integration + +import ( + "os" + "path/filepath" + + . "github.com/containers/podman/v3/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gexec" +) + +var _ = Describe("Podman play kube with build", func() { + var ( + tempdir string + err error + podmanTest *PodmanTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + podmanTest = PodmanTestCreate(tempdir) + podmanTest.Setup() + podmanTest.SeedImages() + }) + + AfterEach(func() { + podmanTest.Cleanup() + f := CurrentGinkgoTestDescription() + processTestResult(f) + + }) + + var testYAML = ` +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: "2021-08-05T17:55:51Z" + labels: + app: foobar + name: top_pod +spec: + containers: + - command: + - top + env: + - name: PATH + value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + - name: TERM + value: xterm + - name: container + value: podman + image: foobar + name: foobar + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - CAP_MKNOD + - CAP_NET_RAW + - CAP_AUDIT_WRITE + privileged: false + readOnlyRootFilesystem: false + seLinuxOptions: {} + tty: true + workingDir: / + dnsConfig: {} +status: {} +` + + var playBuildFile = ` +FROM quay.io/libpod/alpine_nginx:latest +RUN apk update && apk add strace +LABEL homer=dad +` + var prebuiltImage = ` +FROM quay.io/libpod/alpine_nginx:latest +RUN apk update && apk add strace +LABEL marge=mom +` + It("Check that image is built using Dockerfile", func() { + // Setup + yamlDir := filepath.Join(tempdir, RandomString(12)) + err := os.Mkdir(yamlDir, 0755) + err = writeYaml(testYAML, filepath.Join(yamlDir, "top.yaml")) + Expect(err).To(BeNil()) + app1Dir := filepath.Join(yamlDir, "foobar") + err = os.Mkdir(app1Dir, 0755) + Expect(err).To(BeNil()) + err = writeYaml(playBuildFile, filepath.Join(app1Dir, "Dockerfile")) + Expect(err).To(BeNil()) + + // Switch to temp dir and restore it afterwards + cwd, err := os.Getwd() + Expect(err).To(BeNil()) + Expect(os.Chdir(yamlDir)).To(BeNil()) + defer func() { (Expect(os.Chdir(cwd)).To(BeNil())) }() + + session := podmanTest.Podman([]string{"play", "kube", "top.yaml"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + exists := podmanTest.Podman([]string{"image", "exists", "foobar"}) + exists.WaitWithDefaultTimeout() + Expect(exists).Should(Exit(0)) + + inspect := podmanTest.Podman([]string{"container", "inspect", "top_pod-foobar"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect).Should(Exit(0)) + inspectData := inspect.InspectContainerToJSON() + Expect(len(inspectData)).To(BeNumerically(">", 0)) + Expect(inspectData[0].Config.Labels["homer"]).To(Equal("dad")) + }) + + It("Check that image is built using Containerfile", func() { + // Setup + yamlDir := filepath.Join(tempdir, RandomString(12)) + err := os.Mkdir(yamlDir, 0755) + err = writeYaml(testYAML, filepath.Join(yamlDir, "top.yaml")) + Expect(err).To(BeNil()) + app1Dir := filepath.Join(yamlDir, "foobar") + err = os.Mkdir(app1Dir, 0755) + Expect(err).To(BeNil()) + err = writeYaml(playBuildFile, filepath.Join(app1Dir, "Containerfile")) + Expect(err).To(BeNil()) + + // Switch to temp dir and restore it afterwards + cwd, err := os.Getwd() + Expect(err).To(BeNil()) + Expect(os.Chdir(yamlDir)).To(BeNil()) + defer func() { (Expect(os.Chdir(cwd)).To(BeNil())) }() + + session := podmanTest.Podman([]string{"play", "kube", "top.yaml"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + exists := podmanTest.Podman([]string{"image", "exists", "foobar"}) + exists.WaitWithDefaultTimeout() + Expect(exists).Should(Exit(0)) + + inspect := podmanTest.Podman([]string{"container", "inspect", "top_pod-foobar"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect).Should(Exit(0)) + inspectData := inspect.InspectContainerToJSON() + Expect(len(inspectData)).To(BeNumerically(">", 0)) + Expect(inspectData[0].Config.Labels["homer"]).To(Equal("dad")) + }) + + It("Do not build image if already in the local store", func() { + // Setup + yamlDir := filepath.Join(tempdir, RandomString(12)) + err := os.Mkdir(yamlDir, 0755) + err = writeYaml(testYAML, filepath.Join(yamlDir, "top.yaml")) + Expect(err).To(BeNil()) + + // build an image called foobar but make sure it doesnt have + // the same label as the yaml buildfile, so we can check that + // the image is NOT rebuilt. + err = writeYaml(prebuiltImage, filepath.Join(yamlDir, "Containerfile")) + Expect(err).To(BeNil()) + + app1Dir := filepath.Join(yamlDir, "foobar") + err = os.Mkdir(app1Dir, 0755) + Expect(err).To(BeNil()) + err = writeYaml(playBuildFile, filepath.Join(app1Dir, "Containerfile")) + Expect(err).To(BeNil()) + + // Switch to temp dir and restore it afterwards + cwd, err := os.Getwd() + Expect(err).To(BeNil()) + Expect(os.Chdir(yamlDir)).To(BeNil()) + defer func() { (Expect(os.Chdir(cwd)).To(BeNil())) }() + + // Build the image into the local store + build := podmanTest.Podman([]string{"build", "-t", "foobar", "-f", "Containerfile"}) + build.WaitWithDefaultTimeout() + Expect(build).Should(Exit(0)) + + session := podmanTest.Podman([]string{"play", "kube", "top.yaml"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + inspect := podmanTest.Podman([]string{"container", "inspect", "top_pod-foobar"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect).Should(Exit(0)) + inspectData := inspect.InspectContainerToJSON() + Expect(len(inspectData)).To(BeNumerically(">", 0)) + Expect(inspectData[0].Config.Labels["homer"]).To(Equal("")) + Expect(inspectData[0].Config.Labels["marge"]).To(Equal("mom")) + }) + + It("--build should override image in store", func() { + // Setup + yamlDir := filepath.Join(tempdir, RandomString(12)) + err := os.Mkdir(yamlDir, 0755) + err = writeYaml(testYAML, filepath.Join(yamlDir, "top.yaml")) + Expect(err).To(BeNil()) + + // build an image called foobar but make sure it doesnt have + // the same label as the yaml buildfile, so we can check that + // the image is NOT rebuilt. + err = writeYaml(prebuiltImage, filepath.Join(yamlDir, "Containerfile")) + Expect(err).To(BeNil()) + + app1Dir := filepath.Join(yamlDir, "foobar") + err = os.Mkdir(app1Dir, 0755) + Expect(err).To(BeNil()) + err = writeYaml(playBuildFile, filepath.Join(app1Dir, "Containerfile")) + Expect(err).To(BeNil()) + + // Switch to temp dir and restore it afterwards + cwd, err := os.Getwd() + Expect(err).To(BeNil()) + Expect(os.Chdir(yamlDir)).To(BeNil()) + defer func() { (Expect(os.Chdir(cwd)).To(BeNil())) }() + + // Build the image into the local store + build := podmanTest.Podman([]string{"build", "-t", "foobar", "-f", "Containerfile"}) + build.WaitWithDefaultTimeout() + Expect(build).Should(Exit(0)) + + session := podmanTest.Podman([]string{"play", "kube", "--build", "top.yaml"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + inspect := podmanTest.Podman([]string{"container", "inspect", "top_pod-foobar"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect).Should(Exit(0)) + inspectData := inspect.InspectContainerToJSON() + Expect(len(inspectData)).To(BeNumerically(">", 0)) + Expect(inspectData[0].Config.Labels["homer"]).To(Equal("dad")) + Expect(inspectData[0].Config.Labels["marge"]).To(Equal("")) + }) + +}) diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go index e3096d932..eec4b43a5 100644 --- a/test/e2e/play_kube_test.go +++ b/test/e2e/play_kube_test.go @@ -512,21 +512,6 @@ var ( defaultSecret = []byte(`{"FOO":"Zm9v","BAR":"YmFy"}`) ) -func writeYaml(content string, fileName string) error { - f, err := os.Create(fileName) - if err != nil { - return err - } - defer f.Close() - - _, err = f.WriteString(content) - if err != nil { - return err - } - - return nil -} - // getKubeYaml returns a kubernetes YAML document. func getKubeYaml(kind string, object interface{}) (string, error) { var yamlTemplate string diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index d68aa6ac4..1fb1a179a 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -946,7 +946,7 @@ USER mail`, BB) Expect(err).To(BeNil()) mountpoint := "/myvol/" - session := podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint, ALPINE, "cat", mountpoint + filename}) + session := podmanTest.Podman([]string{"create", "--volume", vol + ":" + mountpoint + ":z", ALPINE, "cat", mountpoint + filename}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) ctrID := session.OutputToString() diff --git a/test/e2e/stats_test.go b/test/e2e/stats_test.go index a0be5d462..c0d56fdbc 100644 --- a/test/e2e/stats_test.go +++ b/test/e2e/stats_test.go @@ -22,6 +22,9 @@ var _ = Describe("Podman stats", func() { BeforeEach(func() { SkipIfRootlessCgroupsV1("stats not supported on cgroupv1 for rootless users") + if isContainerized() { + SkipIfCgroupV1("stats not supported inside cgroupv1 container environment") + } var err error tempdir, err = CreateTempDirInTempDir() if err != nil { diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go index bb51d6ac2..3213a839a 100644 --- a/test/e2e/systemd_test.go +++ b/test/e2e/systemd_test.go @@ -6,7 +6,6 @@ import ( "strings" "time" - "github.com/containers/podman/v3/pkg/rootless" . "github.com/containers/podman/v3/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -118,11 +117,13 @@ WantedBy=multi-user.target Expect(len(conData)).To(Equal(1)) Expect(conData[0].Config.SystemdMode).To(BeTrue()) - if CGROUPSV2 || !rootless.IsRootless() { - stats := podmanTest.Podman([]string{"stats", "--no-stream", ctrName}) - stats.WaitWithDefaultTimeout() - Expect(stats).Should(Exit(0)) + // stats not supported w/ CGv1 rootless or containerized + if isCgroupsV1() && (isRootless() || isContainerized()) { + return } + stats := podmanTest.Podman([]string{"stats", "--no-stream", ctrName}) + stats.WaitWithDefaultTimeout() + Expect(stats).Should(Exit(0)) }) It("podman create container with systemd entrypoint triggers systemd mode", func() { diff --git a/troubleshooting.md b/troubleshooting.md index 24dcb8e35..dcf1d8715 100644 --- a/troubleshooting.md +++ b/troubleshooting.md @@ -746,3 +746,138 @@ an Infra container image for CNI-in-slirp4netns must be created. The instructions for building the Infra container image can be found for v2.2.1 [here](https://github.com/containers/podman/tree/v2.2.1-rhel/contrib/rootless-cni-infra), and for v3.0.1 [here](https://github.com/containers/podman/tree/v3.0.1-rhel/contrib/rootless-cni-infra). +### 29) Container related firewall rules are lost after reloading firewalld +Container network can't be reached after `firewall-cmd --reload` and `systemctl restart firewalld` Running `podman network reload` will fix it but it has to be done manually. + +#### Symptom +The firewall rules created by podman are lost when the firewall is reloaded. + +#### Solution +[@ranjithrajaram](https://github.com/containers/podman/issues/5431#issuecomment-847758377) has created a systemd-hook to fix this issue + +1) For "firewall-cmd --reload", create a systemd unit file with the following +``` +[Unit] +Description=firewalld reload hook - run a hook script on firewalld reload +Wants=dbus.service +After=dbus.service + +[Service] +Type=simple +ExecStart=/bin/bash -c '/bin/busctl monitor --system --match "interface=org.fedoraproject.FirewallD1,member=Reloaded" --match "interface=org.fedoraproject.FirewallD1,member=PropertiesChanged" | while read -r line ; do podman network reload --all ; done' + +[Install] +WantedBy=multi-user.target +``` +2) For "systemctl restart firewalld", create a systemd unit file with the following +``` +[Unit] +Description=podman network reload +Wants=firewalld.service +After=firewalld.service +PartOf=firewalld.service + +[Service] +Type=simple +RemainAfterExit=yes +ExecStart=/usr/bin/podman network reload --all + +[Install] +WantedBy=multi-user.target +``` +However, If you use busctl monitor then you can't get machine-readable output on `RHEL 8`. +Since it doesn't have `busctl -j` as mentioned here by [@yrro](https://github.com/containers/podman/issues/5431#issuecomment-896943018). + +For RHEL 8, you can use the following one-liner bash script. +``` +[Unit] +Description=Redo podman NAT rules after firewalld starts or reloads +Wants=dbus.service +After=dbus.service +Requires=firewalld.service + +[Service] +Type=simple +ExecStart=/bin/bash -c "dbus-monitor --profile --system 'type=signal,sender=org.freedesktop.DBus,path=/org/freedesktop/DBus,interface=org.freedesktop.DBus,member=NameAcquired,arg0=org.fedoraproject.FirewallD1' 'type=signal,path=/org/fedoraproject/FirewallD1,interface=org.fedoraproject.FirewallD1,member=Reloaded' | sed -u '/^#/d' | while read -r type timestamp serial sender destination path interface member _junk; do if [[ $type = '#'* ]]; then continue; elif [[ $interface = org.freedesktop.DBus && $member = NameAcquired ]]; then echo 'firewalld started'; podman network reload --all; elif [[ $interface = org.fedoraproject.FirewallD1 && $member = Reloaded ]]; then echo 'firewalld reloaded'; podman network reload --all; fi; done" +Restart=Always + +[Install] +WantedBy=multi-user.target +``` +`busctl-monitor` is almost usable in `RHEL 8`, except that it always outputs two bogus events when it starts up, +one of which is (in its only machine-readable format) indistinguishable from the `NameOwnerChanged` that you get when firewalld starts up. +This means you would get an extra `podman network reload --all` when this unit starts. + +Apart from this, you can use the following systemd service with the python3 code. + +``` +[Unit] +Description=Redo podman NAT rules after firewalld starts or reloads +Wants=dbus.service +Requires=firewalld.service +After=dbus.service + +[Service] +Type=simple +ExecStart=/usr/bin/python /path/to/python/code/podman-redo-nat.py +Restart=always + +[Install] +WantedBy=multi-user.target +``` +The code reloads podman network twice when you use `systemctl restart firewalld`. +``` +import dbus +from gi.repository import GLib +from dbus.mainloop.glib import DBusGMainLoop +import subprocess +import sys + +# I'm a bit confused on the return values in the code +# Not sure if they are needed. + +def reload_podman_network(): + try: + subprocess.run(["podman","network","reload","--all"],timeout=90) + # I'm not sure about this part + sys.stdout.write("podman network reload done\n") + sys.stdout.flush() + except subprocess.TimeoutExpired as t: + sys.stderr.write(f"Podman reload failed due to Timeout {t}") + except subprocess.CalledProcessError as e: + sys.stderr.write(f"Podman reload failed due to {e}") + except Exception as e: + sys.stderr.write(f"Podman reload failed with an Unhandled Exception {e}") + + return False + +def signal_handler(*args, **kwargs): + if kwargs.get('member') == "Reloaded": + reload_podman_network() + elif kwargs.get('member') == "NameOwnerChanged": + reload_podman_network() + else: + return None + return None + +def signal_listener(): + try: + DBusGMainLoop(set_as_default=True)# Define the loop. + loop = GLib.MainLoop() + system_bus = dbus.SystemBus() + # Listens to systemctl restart firewalld with a filter added, will cause podman network to be reloaded twice + system_bus.add_signal_receiver(signal_handler,dbus_interface='org.freedesktop.DBus',arg0='org.fedoraproject.FirewallD1',member_keyword='member') + # Listens to firewall-cmd --reload + system_bus.add_signal_receiver(signal_handler,dbus_interface='org.fedoraproject.FirewallD1',signal_name='Reloaded',member_keyword='member') + loop.run() + except KeyboardInterrupt: + loop.quit() + sys.exit(0) + except Exception as e: + loop.quit() + sys.stderr.write(f"Error occured {e}") + sys.exit(1) + +if __name__ == "__main__": + signal_listener() +``` diff --git a/vendor/modules.txt b/vendor/modules.txt index c4c16beed..d791c9cfc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -557,7 +557,7 @@ github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/rivo/uniseg v0.2.0 github.com/rivo/uniseg -# github.com/rootless-containers/rootlesskit v0.14.4 +# github.com/rootless-containers/rootlesskit v0.14.5 github.com/rootless-containers/rootlesskit/pkg/api github.com/rootless-containers/rootlesskit/pkg/msgutil github.com/rootless-containers/rootlesskit/pkg/port |