diff options
-rw-r--r-- | cmd/podman/images/search.go | 12 | ||||
-rw-r--r-- | completions/bash/podman | 1 | ||||
-rw-r--r-- | docs/source/Introduction.rst | 4 | ||||
-rw-r--r-- | docs/source/markdown/podman-search.1.md | 18 | ||||
-rw-r--r-- | libpod/image/search.go | 51 | ||||
-rw-r--r-- | libpod/networking_linux.go | 11 | ||||
-rw-r--r-- | pkg/api/handlers/compat/containers.go | 3 | ||||
-rw-r--r-- | pkg/api/handlers/compat/events.go | 3 | ||||
-rw-r--r-- | pkg/api/handlers/libpod/images.go | 7 | ||||
-rw-r--r-- | pkg/api/server/register_images.go | 4 | ||||
-rw-r--r-- | pkg/bindings/images/images.go | 1 | ||||
-rw-r--r-- | pkg/domain/entities/images.go | 4 | ||||
-rw-r--r-- | pkg/domain/infra/abi/images.go | 2 | ||||
-rw-r--r-- | pkg/spec/config_linux_cgo.go | 2 | ||||
-rw-r--r-- | test/apiv2/12-imagesMore.at | 44 | ||||
-rw-r--r-- | test/e2e/config.go | 1 | ||||
-rw-r--r-- | test/e2e/network_test.go | 37 | ||||
-rw-r--r-- | test/e2e/search_test.go | 20 | ||||
-rw-r--r-- | test/e2e/toolbox_test.go | 368 | ||||
-rw-r--r-- | test/system/010-images.bats | 52 | ||||
-rw-r--r-- | test/system/060-mount.bats | 13 | ||||
-rw-r--r-- | test/system/130-kill.bats | 20 | ||||
-rw-r--r-- | test/system/410-selinux.bats | 108 | ||||
-rw-r--r-- | test/system/helpers.bash | 11 |
24 files changed, 765 insertions, 32 deletions
diff --git a/cmd/podman/images/search.go b/cmd/podman/images/search.go index b8f590585..8edd776ce 100644 --- a/cmd/podman/images/search.go +++ b/cmd/podman/images/search.go @@ -85,6 +85,7 @@ func searchFlags(flags *pflag.FlagSet) { flags.BoolVar(&searchOptions.NoTrunc, "no-trunc", false, "Do not truncate the output") flags.StringVar(&searchOptions.Authfile, "authfile", auth.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") flags.BoolVar(&searchOptions.TLSVerifyCLI, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries") + flags.BoolVar(&searchOptions.ListTags, "list-tags", false, "List the tags of the input registry") } // imageSearch implements the command for searching images. @@ -101,6 +102,10 @@ func imageSearch(cmd *cobra.Command, args []string) error { return errors.Errorf("Limit %d is outside the range of [1, 100]", searchOptions.Limit) } + if searchOptions.ListTags && len(searchOptions.Filters) != 0 { + return errors.Errorf("filters are not applicable to list tags result") + } + // TLS verification in c/image is controlled via a `types.OptionalBool` // which allows for distinguishing among set-true, set-false, unspecified // which is important to implement a sane way of dealing with defaults of @@ -119,12 +124,19 @@ func imageSearch(cmd *cobra.Command, args []string) error { if err != nil { return err } + if len(searchReport) == 0 { return nil } hdrs := report.Headers(entities.ImageSearchReport{}, nil) row := "{{.Index}}\t{{.Name}}\t{{.Description}}\t{{.Stars}}\t{{.Official}}\t{{.Automated}}\n" + if searchOptions.ListTags { + if len(searchOptions.Filters) != 0 { + return errors.Errorf("filters are not applicable to list tags result") + } + row = "{{.Name}}\t{{.Tag}}\n" + } if cmd.Flags().Changed("format") { row = report.NormalizeFormat(searchOptions.Format) } diff --git a/completions/bash/podman b/completions/bash/podman index e12862126..564d35f67 100644 --- a/completions/bash/podman +++ b/completions/bash/podman @@ -2024,6 +2024,7 @@ _podman_search() { --help -h --no-trunc + --list-tags " _complete_ "$options_with_args" "$boolean_options" } diff --git a/docs/source/Introduction.rst b/docs/source/Introduction.rst index a1f9d605e..9dcae8a83 100644 --- a/docs/source/Introduction.rst +++ b/docs/source/Introduction.rst @@ -100,7 +100,7 @@ To summarize, Podman makes it easy to find, run, build and share containers. * Find: whether finding a container on dockerhub.io or quay.io, an internal registry server, or directly from a vendor, a couple of `podman search`_, and `podman pull`_ commands make it easy * Run: it's easy to consume pre-built images with everything needed to run an entire application, or start from a Linux distribution base image with the `podman run`_ command -* Build: creating new layers with small tweaks, or major overhauls is easy with `podman build` -* Share: Podman let’s you push your newly built containers anywhere you want with a single `podman push`_ command +* Build: creating new layers with small tweaks, or major overhauls is easy with `podman build`_ +* Share: Podman lets you push your newly built containers anywhere you want with a single `podman push`_ command For more instructions on use cases, take a look at our :doc:`Tutorials` page. diff --git a/docs/source/markdown/podman-search.1.md b/docs/source/markdown/podman-search.1.md index 2c2a8f012..fc09d96ea 100644 --- a/docs/source/markdown/podman-search.1.md +++ b/docs/source/markdown/podman-search.1.md @@ -56,6 +56,9 @@ Valid placeholders for the Go template are listed below: | .Stars | Star count of image | | .Official | "[OK]" if image is official | | .Automated | "[OK]" if image is automated | +| .Tag | Repository tag | + +Note: use .Tag only if the --list-tags is set. **--limit**=*limit* @@ -65,6 +68,12 @@ Example if limit is 10 and two registries are being searched, the total number of results will be 20, 10 from each (if there are at least 10 matches in each). The order of the search results is the order in which the API endpoint returns the results. +**--list-tags** + +List the available tags in the repository for the specified image. +**Note:** --list-tags requires the search term to be a fully specified image name. +The result contains the Image name and its tag, one line for every tag associated with the image. + **--no-trunc** Do not truncate the output @@ -140,6 +149,15 @@ fedoraproject.org registry.fedoraproject.org/f25/kubernetes-proxy fedoraproject.org registry.fedoraproject.org/f25/kubernetes-scheduler 0 fedoraproject.org registry.fedoraproject.org/f25/mariadb 0 ``` + +``` +$ podman search --list-tags registry.redhat.io/rhel +NAME TAG +registry.redhat.io/rhel 7.3-74 +registry.redhat.io/rhel 7.6-301 +registry.redhat.io/rhel 7.1-9 +... +``` Note: This works only with registries that implement the v2 API. If tried with a v1 registry an error will be returned. ## FILES diff --git a/libpod/image/search.go b/libpod/image/search.go index 6bcc6d3f8..5f5845989 100644 --- a/libpod/image/search.go +++ b/libpod/image/search.go @@ -2,11 +2,13 @@ package image import ( "context" + "fmt" "strconv" "strings" "sync" "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" sysreg "github.com/containers/podman/v2/pkg/registries" "github.com/pkg/errors" @@ -34,6 +36,8 @@ type SearchResult struct { Official string // Automated indicates if the image was created by an automated build. Automated string + // Tag is the image tag + Tag string } // SearchOptions are used to control the behaviour of SearchImages. @@ -49,6 +53,8 @@ type SearchOptions struct { Authfile string // InsecureSkipTLSVerify allows to skip TLS verification. InsecureSkipTLSVerify types.OptionalBool + // ListTags returns the search result with available tags + ListTags bool } // SearchFilter allows filtering the results of SearchImages. @@ -147,6 +153,15 @@ func searchImageInRegistry(term string, registry string, options SearchOptions) // every types.SystemContext, and to compute the value just once in one // place. sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath() + if options.ListTags { + results, err := searchRepositoryTags(registry, term, sc, options) + if err != nil { + logrus.Errorf("error listing registry tags %q: %v", registry, err) + return []SearchResult{} + } + return results + } + results, err := docker.SearchRegistry(context.TODO(), sc, registry, term, limit) if err != nil { logrus.Errorf("error searching registry %q: %v", registry, err) @@ -207,6 +222,42 @@ func searchImageInRegistry(term string, registry string, options SearchOptions) return paramsArr } +func searchRepositoryTags(registry, term string, sc *types.SystemContext, options SearchOptions) ([]SearchResult, error) { + dockerPrefix := fmt.Sprintf("%s://", docker.Transport.Name()) + imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term)) + if err == nil && imageRef.Transport().Name() != docker.Transport.Name() { + return nil, errors.Errorf("reference %q must be a docker reference", term) + } else if err != nil { + imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term))) + if err != nil { + return nil, errors.Errorf("reference %q must be a docker reference", term) + } + } + tags, err := docker.GetRepositoryTags(context.TODO(), sc, imageRef) + if err != nil { + return nil, errors.Errorf("error getting repository tags: %v", err) + } + limit := maxQueries + if len(tags) < limit { + limit = len(tags) + } + if options.Limit != 0 { + limit = len(tags) + if options.Limit < limit { + limit = options.Limit + } + } + paramsArr := []SearchResult{} + for i := 0; i < limit; i++ { + params := SearchResult{ + Name: imageRef.DockerReference().Name(), + Tag: tags[i], + } + paramsArr = append(paramsArr, params) + } + return paramsArr, nil +} + // ParseSearchFilter turns the filter into a SearchFilter that can be used for // searching images. func ParseSearchFilter(filter []string) (*SearchFilter, error) { diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index d16bdc973..f87c311ce 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -828,6 +828,17 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e // We can't do more if the network is down. if c.state.NetNS == nil { + // We still want to make dummy configurations for each CNI net + // the container joined. + if len(c.config.Networks) > 0 { + settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(c.config.Networks)) + for _, net := range c.config.Networks { + cniNet := new(define.InspectAdditionalNetwork) + cniNet.NetworkID = net + settings.Networks[net] = cniNet + } + } + return settings, nil } diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go index 48ecfff5d..cae8f88fd 100644 --- a/pkg/api/handlers/compat/containers.go +++ b/pkg/api/handlers/compat/containers.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" "syscall" + "time" "github.com/containers/podman/v2/libpod" "github.com/containers/podman/v2/libpod/define" @@ -316,7 +317,7 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON, cb := types.ContainerJSONBase{ ID: l.ID(), - Created: l.CreatedTime().String(), + Created: l.CreatedTime().Format(time.RFC3339Nano), Path: "", Args: nil, State: &state, diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go index 9efdd1261..a729b84d4 100644 --- a/pkg/api/handlers/compat/events.go +++ b/pkg/api/handlers/compat/events.go @@ -139,7 +139,8 @@ func GetEvents(w http.ResponseWriter, r *http.Request) { if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } + case <-r.Context().Done(): + return } - } } diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go index 43123c5a3..1292090fb 100644 --- a/pkg/api/handlers/libpod/images.go +++ b/pkg/api/handlers/libpod/images.go @@ -608,6 +608,7 @@ func SearchImages(w http.ResponseWriter, r *http.Request) { NoTrunc bool `json:"noTrunc"` Filters []string `json:"filters"` TLSVerify bool `json:"tlsVerify"` + ListTags bool `json:"listTags"` }{ // This is where you can override the golang default value for one of fields } @@ -618,8 +619,9 @@ func SearchImages(w http.ResponseWriter, r *http.Request) { } options := image.SearchOptions{ - Limit: query.Limit, - NoTrunc: query.NoTrunc, + Limit: query.Limit, + NoTrunc: query.NoTrunc, + ListTags: query.ListTags, } if _, found := r.URL.Query()["tlsVerify"]; found { options.InsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify) @@ -650,6 +652,7 @@ func SearchImages(w http.ResponseWriter, r *http.Request) { reports[i].Stars = searchResults[i].Stars reports[i].Official = searchResults[i].Official reports[i].Automated = searchResults[i].Automated + reports[i].Tag = searchResults[i].Tag } utils.WriteResponse(w, http.StatusOK, reports) diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go index ad779203d..c2423218a 100644 --- a/pkg/api/server/register_images.go +++ b/pkg/api/server/register_images.go @@ -169,6 +169,10 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // - `is-automated=(true|false)` // - `is-official=(true|false)` // - `stars=<number>` Matches images that has at least 'number' stars. + // - in: query + // name: listTags + // type: boolean + // description: list the available tags in the repository // produces: // - application/json // responses: diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go index a78e7f4c6..2d3035d8d 100644 --- a/pkg/bindings/images/images.go +++ b/pkg/bindings/images/images.go @@ -314,6 +314,7 @@ func Search(ctx context.Context, term string, opts entities.ImageSearchOptions) params.Set("term", term) params.Set("limit", strconv.Itoa(opts.Limit)) params.Set("noTrunc", strconv.FormatBool(opts.NoTrunc)) + params.Set("listTags", strconv.FormatBool(opts.ListTags)) for _, f := range opts.Filters { params.Set("filters", f) } diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index ac81c282d..982fa0cc0 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -214,6 +214,8 @@ type ImageSearchOptions struct { NoTrunc bool // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify types.OptionalBool + // ListTags search the available tags of the repository + ListTags bool } // ImageSearchReport is the response from searching images. @@ -230,6 +232,8 @@ type ImageSearchReport struct { Official string // Automated indicates if the image was created by an automated build. Automated string + // Tag is the repository tag + Tag string } // Image List Options diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 3bb7de83c..f9d733c63 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -511,6 +511,7 @@ func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.Im Limit: opts.Limit, NoTrunc: opts.NoTrunc, InsecureSkipTLSVerify: opts.SkipTLSVerify, + ListTags: opts.ListTags, } searchResults, err := image.SearchImages(term, searchOpts) @@ -529,6 +530,7 @@ func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.Im reports[i].Stars = searchResults[i].Stars reports[i].Official = searchResults[i].Official reports[i].Automated = searchResults[i].Automated + reports[i].Tag = searchResults[i].Tag } return reports, nil diff --git a/pkg/spec/config_linux_cgo.go b/pkg/spec/config_linux_cgo.go index bc8fc4e29..d0891b574 100644 --- a/pkg/spec/config_linux_cgo.go +++ b/pkg/spec/config_linux_cgo.go @@ -39,7 +39,7 @@ func getSeccompConfig(config *SecurityConfig, configSpec *spec.Spec) (*spec.Linu logrus.Debug("Loading default seccomp profile") seccompConfig, err = goSeccomp.GetDefaultProfile(configSpec) if err != nil { - return nil, errors.Wrapf(err, "loading seccomp profile (%s) failed", config.SeccompProfilePath) + return nil, errors.Wrapf(err, "loading default seccomp profile failed") } } diff --git a/test/apiv2/12-imagesMore.at b/test/apiv2/12-imagesMore.at new file mode 100644 index 000000000..30ccf0cfc --- /dev/null +++ b/test/apiv2/12-imagesMore.at @@ -0,0 +1,44 @@ +# -*- sh -*- +# +# Tests for more image-related endpoints +# + +podman pull -q $IMAGE + +t GET libpod/images/json 200 \ + .[0].Id~[0-9a-f]\\{64\\} +iid=$(jq -r '.[0].Id' <<<"$output") + +# Retrieve the image tree +t GET libpod/images/$IMAGE/tree 200 \ + .Tree~^Image + +# Tag nonesuch image +t POST "libpod/images/nonesuch/tag?repo=myrepo&tag=mytag" '' 404 + +# Tag the image +t POST "libpod/images/$IMAGE/tag?repo=localhost:5000/myrepo&tag=mytag" '' 201 + +t GET libpod/images/$IMAGE/json 200 \ + .RepoTags[1]=localhost:5000/myrepo:mytag + +# Run registry container +podman run -d --name registry -p 5000:5000 docker.io/library/registry:2.6 /entrypoint.sh /etc/docker/registry/config.yml + +# Push to local registry +t POST libpod/images/localhost:5000/myrepo:mytag/push\?tlsVerify\=false '' 200 + +# Untag the image +t POST "libpod/images/$iid/untag?repo=localhost:5000/myrepo&tag=mytag" '' 201 + +t GET libpod/images/$IMAGE/json 200 \ + .RepoTags[-1]=$IMAGE + +# Remove the registry container +t DELETE libpod/containers/registry?force=true 204 + +# Remove images +t DELETE libpod/images/$IMAGE 200 \ + .ExitCode=0 +t DELETE libpod/images/docker.io/library/registry:2.6 200 \ + .ExitCode=0 diff --git a/test/e2e/config.go b/test/e2e/config.go index 49a47c7da..54e39f9d2 100644 --- a/test/e2e/config.go +++ b/test/e2e/config.go @@ -14,6 +14,7 @@ var ( BB = "docker.io/library/busybox:latest" healthcheck = "docker.io/libpod/alpine_healthcheck:latest" ImageCacheDir = "/tmp/podman/imagecachedir" + fedoraToolbox = "registry.fedoraproject.org/f32/fedora-toolbox:latest" // This image has seccomp profiles that blocks all syscalls. // The intention behind blocking all syscalls is to prevent diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go index cbfd72da6..9bd16c008 100644 --- a/test/e2e/network_test.go +++ b/test/e2e/network_test.go @@ -211,6 +211,43 @@ var _ = Describe("Podman network", func() { Expect(rmAll.ExitCode()).To(BeZero()) }) + It("podman inspect container two CNI networks (container not running)", func() { + netName1 := "testNetThreeCNI1" + network1 := podmanTest.Podman([]string{"network", "create", netName1}) + network1.WaitWithDefaultTimeout() + Expect(network1.ExitCode()).To(BeZero()) + defer podmanTest.removeCNINetwork(netName1) + + netName2 := "testNetThreeCNI2" + network2 := podmanTest.Podman([]string{"network", "create", netName2}) + network2.WaitWithDefaultTimeout() + Expect(network2.ExitCode()).To(BeZero()) + defer podmanTest.removeCNINetwork(netName2) + + ctrName := "testCtr" + container := podmanTest.Podman([]string{"create", "--network", fmt.Sprintf("%s,%s", netName1, netName2), "--name", ctrName, ALPINE, "top"}) + container.WaitWithDefaultTimeout() + Expect(container.ExitCode()).To(BeZero()) + + inspect := podmanTest.Podman([]string{"inspect", ctrName}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(BeZero()) + conData := inspect.InspectContainerToJSON() + Expect(len(conData)).To(Equal(1)) + Expect(len(conData[0].NetworkSettings.Networks)).To(Equal(2)) + net1, ok := conData[0].NetworkSettings.Networks[netName1] + Expect(ok).To(BeTrue()) + Expect(net1.NetworkID).To(Equal(netName1)) + net2, ok := conData[0].NetworkSettings.Networks[netName2] + Expect(ok).To(BeTrue()) + Expect(net2.NetworkID).To(Equal(netName2)) + + // Necessary to ensure the CNI network is removed cleanly + rmAll := podmanTest.Podman([]string{"rm", "-f", ctrName}) + rmAll.WaitWithDefaultTimeout() + Expect(rmAll.ExitCode()).To(BeZero()) + }) + It("podman inspect container two CNI networks", func() { netName1 := "testNetTwoCNI1" network1 := podmanTest.Podman([]string{"network", "create", "--subnet", "10.50.51.0/25", netName1}) diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go index 043da9059..0cf005529 100644 --- a/test/e2e/search_test.go +++ b/test/e2e/search_test.go @@ -423,4 +423,24 @@ registries = ['{{.Host}}:{{.Port}}']` Expect(search.ExitCode()).To(Equal(0)) Expect(len(search.OutputToStringArray()) > 1).To(BeTrue()) }) + + It("podman search repository tags", func() { + search := podmanTest.Podman([]string{"search", "--list-tags", "--limit", "30", "docker.io/library/alpine"}) + search.WaitWithDefaultTimeout() + Expect(search.ExitCode()).To(Equal(0)) + Expect(len(search.OutputToStringArray())).To(Equal(31)) + + search = podmanTest.Podman([]string{"search", "--list-tags", "docker.io/library/alpine"}) + search.WaitWithDefaultTimeout() + Expect(search.ExitCode()).To(Equal(0)) + Expect(len(search.OutputToStringArray()) > 2).To(BeTrue()) + + search = podmanTest.Podman([]string{"search", "--filter=is-official", "--list-tags", "docker.io/library/alpine"}) + search.WaitWithDefaultTimeout() + Expect(search.ExitCode()).To(Not(Equal(0))) + + search = podmanTest.Podman([]string{"search", "--list-tags", "docker.io/library/"}) + search.WaitWithDefaultTimeout() + Expect(len(search.OutputToStringArray()) == 0).To(BeTrue()) + }) }) diff --git a/test/e2e/toolbox_test.go b/test/e2e/toolbox_test.go new file mode 100644 index 000000000..6122cee19 --- /dev/null +++ b/test/e2e/toolbox_test.go @@ -0,0 +1,368 @@ +package integration + +/* + toolbox_test.go is under the care of the Toolbox Team. + + The tests are trying to stress parts of Podman that Toolbox[0] needs for + its functionality. + + [0] https://github.com/containers/toolbox + + Info about test cases: + - some tests rely on a certain configuration of a container that is done by + executing several commands in the entry-point of a container. To make + sure the initialization had enough time to be executed, + WaitContainerReady() after the container is started. + + - in several places there's an invocation of 'podman logs' It is there mainly + to ease debugging when a test goes wrong (during the initialization of a + container) but sometimes it is also used in the test case itself. + + Maintainers (Toolbox Team): + - Ondřej Míchal <harrymichal@fedoraproject.org> + - Debarshi Ray <rishi@fedoraproject.org> + + Also available on Freenode IRC on #silverblue or #podman +*/ + +import ( + "fmt" + "os" + "os/exec" + "os/user" + "strconv" + "strings" + "syscall" + + . "github.com/containers/podman/v2/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Toolbox-specific testing", func() { + var ( + tempdir string + err error + podmanTest *PodmanTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + podmanTest = PodmanTestCreate(tempdir) + podmanTest.Setup() + podmanTest.SeedImages() + }) + + AfterEach(func() { + podmanTest.Cleanup() + f := CurrentGinkgoTestDescription() + processTestResult(f) + }) + + It("podman run --dns=none - allows self-management of /etc/resolv.conf", func() { + var session *PodmanSessionIntegration + + session = podmanTest.Podman([]string{"run", "--dns", "none", ALPINE, "sh", "-c", + "rm -f /etc/resolv.conf; touch -d '1970-01-01 00:02:03' /etc/resolv.conf; stat -c %s:%Y /etc/resolv.conf"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("0:123")) + }) + + It("podman run --no-hosts - allows self-management of /etc/hosts", func() { + var session *PodmanSessionIntegration + + session = podmanTest.Podman([]string{"run", "--no-hosts", ALPINE, "sh", "-c", + "rm -f /etc/hosts; touch -d '1970-01-01 00:02:03' /etc/hosts; stat -c %s:%Y /etc/hosts"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("0:123")) + }) + + It("podman create --ulimit host + podman exec - correctly mirrors hosts ulimits", func() { + if podmanTest.RemoteTest { + Skip("Ulimit check does not work with a remote client") + } + var session *PodmanSessionIntegration + var containerHardLimit int + var rlimit syscall.Rlimit + var err error + + err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit) + Expect(err).To(BeNil()) + fmt.Printf("Expected value: %d", rlimit.Max) + + session = podmanTest.Podman([]string{"create", "--name", "test", "--ulimit", "host", ALPINE, + "sleep", "1000"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"start", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"exec", "test", "sh", "-c", + "ulimit -H -n"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + containerHardLimit, err = strconv.Atoi(strings.Trim(session.OutputToString(), "\n")) + Expect(err).To(BeNil()) + Expect(containerHardLimit).To(BeNumerically(">=", rlimit.Max)) + }) + + It("podman create --ipc=host --pid=host + podman exec - correct shared memory limit size", func() { + // Comparison of the size of /dev/shm on the host being equal to the one in + // a container + if podmanTest.RemoteTest { + Skip("Shm size check does not work with a remote client") + } + var session *PodmanSessionIntegration + var cmd *exec.Cmd + var hostShmSize, containerShmSize int + var err error + + // Because Alpine uses busybox, most commands don't offer advanced options + // like "--output" in df. Therefore the value of the field 'Size' (or + // ('1K-blocks') needs to be extracted manually. + cmd = exec.Command("df", "/dev/shm") + res, err := cmd.Output() + Expect(err).To(BeNil()) + lines := strings.SplitN(string(res), "\n", 2) + fields := strings.Fields(lines[len(lines)-1]) + hostShmSize, err = strconv.Atoi(fields[1]) + Expect(err).To(BeNil()) + + session = podmanTest.Podman([]string{"create", "--name", "test", "--ipc=host", "--pid=host", ALPINE, + "sleep", "1000"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"start", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"exec", "test", + "df", "/dev/shm"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + lines = session.OutputToStringArray() + fields = strings.Fields(lines[len(lines)-1]) + containerShmSize, err = strconv.Atoi(fields[1]) + Expect(err).To(BeNil()) + + // In some cases it may happen that the size of /dev/shm is not exactly + // equal. Therefore it's fine if there's a slight tolerance between the + // compared values. + Expect(hostShmSize).To(BeNumerically("~", containerShmSize, 100)) + }) + + It("podman create --userns=keep-id --user root:root - entrypoint - entrypoint is executed as root", func() { + var session *PodmanSessionIntegration + + session = podmanTest.Podman([]string{"run", "--userns=keep-id", "--user", "root:root", ALPINE, + "id"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("uid=0(root) gid=0(root)")) + }) + + It("podman create --userns=keep-id + podman exec - correct names of user and group", func() { + var session *PodmanSessionIntegration + var err error + + currentUser, err := user.Current() + Expect(err).To(BeNil()) + + currentGroup, err := user.LookupGroupId(currentUser.Gid) + Expect(err).To(BeNil()) + + session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", ALPINE, + "sleep", "1000"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(err).To(BeNil()) + + session = podmanTest.Podman([]string{"start", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + expectedOutput := fmt.Sprintf("uid=%s(%s) gid=%s(%s)", + currentUser.Uid, currentUser.Username, + currentGroup.Gid, currentGroup.Name) + + session = podmanTest.Podman([]string{"exec", "test", + "id"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring(expectedOutput)) + }) + + It("podman create --userns=keep-id - entrypoint - adding user with useradd and then removing their password", func() { + var session *PodmanSessionIntegration + + var username string = "testuser" + var homeDir string = "/home/testuser" + var shell string = "/bin/sh" + var uid string = "1001" + var gid string = "1001" + + useradd := fmt.Sprintf("useradd --home-dir %s --shell %s --uid %s %s", + homeDir, shell, uid, username) + passwd := fmt.Sprintf("passwd --delete %s", username) + + session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c", + fmt.Sprintf("%s; %s; echo READY; sleep 1000", useradd, passwd)}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"start", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue()) + + expectedOutput := fmt.Sprintf("%s:x:%s:%s::%s:%s", + username, uid, gid, homeDir, shell) + + session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/passwd"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring(expectedOutput)) + + expectedOutput = "passwd: Note: deleting a password also unlocks the password." + + session = podmanTest.Podman([]string{"logs", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring(expectedOutput)) + }) + + It("podman create --userns=keep-id + podman exec - adding group with groupadd", func() { + var session *PodmanSessionIntegration + + var groupName string = "testgroup" + var gid string = "1001" + + groupadd := fmt.Sprintf("groupadd --gid %s %s", gid, groupName) + + session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c", + fmt.Sprintf("%s; echo READY; sleep 1000", groupadd)}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"start", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue()) + + session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/group"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring(groupName)) + + session = podmanTest.Podman([]string{"logs", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("READY")) + }) + + It("podman create --userns=keep-id - entrypoint - modifying existing user with usermod - add to new group, change home/shell/uid", func() { + var session *PodmanSessionIntegration + var badHomeDir string = "/home/badtestuser" + var badShell string = "/bin/sh" + var badUID string = "1001" + var username string = "testuser" + var homeDir string = "/home/testuser" + var shell string = "/bin/bash" + var uid string = "2000" + var groupName string = "testgroup" + var gid string = "2000" + + // The use of bad* in the name of variables does not imply the invocation + // of useradd should fail The user is supposed to be created successfuly + // but later his information (uid, home, shell,..) is changed via usermod. + useradd := fmt.Sprintf("useradd --home-dir %s --shell %s --uid %s %s", + badHomeDir, badShell, badUID, username) + groupadd := fmt.Sprintf("groupadd --gid %s %s", + gid, groupName) + usermod := fmt.Sprintf("usermod --append --groups wheel --home %s --shell %s --uid %s --gid %s %s", + homeDir, shell, uid, gid, username) + + session = podmanTest.Podman([]string{"create", "--name", "test", "--userns=keep-id", "--user", "root:root", fedoraToolbox, "sh", "-c", + fmt.Sprintf("%s; %s; %s; echo READY; sleep 1000", useradd, groupadd, usermod)}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"start", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue()) + + expectedUser := fmt.Sprintf("%s:x:%s:%s::%s:%s", + username, uid, gid, homeDir, shell) + + session = podmanTest.Podman([]string{"exec", "test", "cat", "/etc/passwd"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring(expectedUser)) + + session = podmanTest.Podman([]string{"logs", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("READY")) + }) + + It("podman run --privileged --userns=keep-id --user root:root - entrypoint - (bind)mounting", func() { + var session *PodmanSessionIntegration + + session = podmanTest.Podman([]string{"run", "--privileged", "--userns=keep-id", "--user", "root:root", ALPINE, + "mount", "-t", "tmpfs", "tmpfs", "/tmp"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"run", "--privileged", "--userns=keep-id", "--user", "root:root", ALPINE, + "mount", "--rbind", "/tmp", "/var/tmp"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + }) + + It("podman create + start - with all needed switches for create - sleep as entry-point", func() { + var session *PodmanSessionIntegration + + // These should be most of the switches that Toolbox uses to create a "toolbox" container + // https://github.com/containers/toolbox/blob/master/src/cmd/create.go + session = podmanTest.Podman([]string{"create", + "--dns", "none", + "--hostname", "toolbox", + "--ipc", "host", + "--label", "com.github.containers.toolbox=true", + "--name", "test", + "--network", "host", + "--no-hosts", + "--pid", "host", + "--privileged", + "--security-opt", "label=disable", + "--ulimit", "host", + "--userns=keep-id", + "--user", "root:root", + fedoraToolbox, "sh", "-c", "echo READY; sleep 1000"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"start", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + Expect(WaitContainerReady(podmanTest, "test", "READY", 2, 1)).To(BeTrue()) + + session = podmanTest.Podman([]string{"logs", "test"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("READY")) + }) +}) diff --git a/test/system/010-images.bats b/test/system/010-images.bats index ac65e54d9..900a24368 100644 --- a/test/system/010-images.bats +++ b/test/system/010-images.bats @@ -159,4 +159,56 @@ Labels.created_at | 20[0-9-]\\\+T[0-9:]\\\+Z is "$output" "$images_baseline" "after podman rmi @sha, still the same" } +# Tests #7199 (Restore "table" --format from V1) +# +# Tag our image with different-length strings; confirm table alignment +@test "podman images - table format" { + # Craft two tags such that they will bracket $IMAGE on either side (above + # and below). This assumes that $IMAGE is quay.io or foo.com or simply + # not something insane that will sort before 'aaa' or after 'zzz'. + local aaa_name=a.b/c + local aaa_tag=d + local zzz_name=zzzzzzzzzz.yyyyyyyyy/xxxxxxxxx + local zzz_tag=$(random_string 15) + + # Helper function to check one line of tabular output; all this does is + # generate a line with the given repo/tag, formatted to the width of the + # widest image, which is the zzz one. Fields are separated by TWO spaces. + function _check_line() { + local lineno=$1 + local name=$2 + local tag=$3 + + is "${lines[$lineno]}" \ + "$(printf '%-*s %-*s %s' ${#zzz_name} ${name} ${#zzz_tag} ${tag} $iid)" \ + "podman images, $testname, line $lineno" + } + + function _run_format_test() { + local testname=$1 + local format=$2 + + run_podman images --sort repository --format "$format" + _check_line 0 ${aaa_name} ${aaa_tag} + _check_line 1 "${PODMAN_TEST_IMAGE_REGISTRY}/${PODMAN_TEST_IMAGE_USER}/${PODMAN_TEST_IMAGE_NAME}" "${PODMAN_TEST_IMAGE_TAG}" + _check_line 2 ${zzz_name} ${zzz_tag} + } + + # Begin the test: tag $IMAGE with both the given names + run_podman tag $IMAGE ${aaa_name}:${aaa_tag} + run_podman tag $IMAGE ${zzz_name}:${zzz_tag} + + # Get the image ID, used to verify output below (all images share same IID) + run_podman inspect --format '{{.ID}}' $IMAGE + iid=${output:0:12} + + # Run the test: this will output three column-aligned rows. Test them. + # Tab character (\t) should have the same effect as the 'table' directive + _run_format_test 'table' 'table {{.Repository}} {{.Tag}} {{.ID}}' + _run_format_test 'tabs' '{{.Repository}}\t{{.Tag}}\t{{.ID}}' + + # Clean up. + run_podman rmi ${aaa_name}:${aaa_tag} ${zzz_name}:${zzz_tag} +} + # vim: filetype=sh diff --git a/test/system/060-mount.bats b/test/system/060-mount.bats index f11aff773..ece87acf6 100644 --- a/test/system/060-mount.bats +++ b/test/system/060-mount.bats @@ -43,6 +43,11 @@ load helpers # Start with clean slate run_podman image umount -a + # Get full image ID, to verify umount + run_podman image inspect --format '{{.ID}}' $IMAGE + iid="$output" + + # Mount, and make sure the mount point exists run_podman image mount $IMAGE mount_path="$output" @@ -60,6 +65,14 @@ load helpers # Clean up run_podman image umount $IMAGE + is "$output" "$iid" "podman image umount: image ID of what was umounted" + + run_podman image umount $IMAGE + is "$output" "" "podman image umount: does not re-umount" + + run_podman 125 image umount no-such-container + is "$output" "Error: unable to find a name and tag match for no-such-container in repotags: no such image" \ + "error message from image umount no-such-container" run_podman image mount is "$output" "" "podman image mount, no args, after umount" diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats index c16e64c58..3770eac27 100644 --- a/test/system/130-kill.bats +++ b/test/system/130-kill.bats @@ -6,23 +6,9 @@ load helpers @test "podman kill - test signal handling in containers" { - # podman-remote and crun interact poorly in f31: crun seems to gobble up - # some signals. - # Workaround: run 'env --default-signal sh' instead of just 'sh' in - # the container. Since env on our regular alpine image doesn't support - # that flag, we need to pull fedora-minimal. See: - # https://github.com/containers/podman/issues/5004 - # FIXME: remove this kludge once we get rid of podman-remote - local _image=$IMAGE - local _sh_cmd="sh" - if is_remote; then - _image=quay.io/libpod/fedora-minimal:latest - _sh_cmd="env --default-signal sh" - fi - # Start a container that will handle all signals by emitting 'got: N' local -a signals=(1 2 3 4 5 6 8 10 12 13 14 15 16 20 21 22 23 24 25 26 64) - run_podman run -d $_image $_sh_cmd -c \ + run_podman run -d $IMAGE sh -c \ "for i in ${signals[*]}; do trap \"echo got: \$i\" \$i; done; echo READY; while ! test -e /stop; do sleep 0.05; done; @@ -81,10 +67,6 @@ load helpers run_podman wait $cid run_podman rm $cid wait $podman_log_pid - - if [[ $_image != $IMAGE ]]; then - run_podman rmi $_image - fi } @test "podman kill - rejects invalid args" { diff --git a/test/system/410-selinux.bats b/test/system/410-selinux.bats index 497e29b3e..1e44fe06c 100644 --- a/test/system/410-selinux.bats +++ b/test/system/410-selinux.bats @@ -7,9 +7,7 @@ load helpers function check_label() { - if [ ! -e /usr/sbin/selinuxenabled ] || ! /usr/sbin/selinuxenabled; then - skip "selinux disabled or not available" - fi + skip_if_no_selinux local args="$1"; shift # command-line args for run @@ -52,15 +50,33 @@ function check_label() { check_label "--privileged --userns=host" "spc_t" } +@test "podman selinux: pid=host" { + # FIXME FIXME FIXME: Remove these lines once all VMs have >= 2.146.0 + # (this is ugly, but better than an unconditional skip) + skip_if_no_selinux + if is_rootless; then + if [ -x /usr/bin/rpm ]; then + cs_version=$(rpm -q --qf '%{version}' container-selinux) + else + # SELinux not enabled on Ubuntu, so we should never get here + die "WHOA! SELinux enabled, but no /usr/bin/rpm!" + fi + if [[ "$cs_version" < "2.146" ]]; then + skip "FIXME: #7939: requires container-selinux-2.146.0 (currently installed: $cs_version)" + fi + fi + # FIXME FIXME FIXME: delete up to here, leaving just check_label + + check_label "--pid=host" "spc_t" +} + @test "podman selinux: container with overridden range" { check_label "--security-opt label=level:s0:c1,c2" "container_t" "s0:c1,c2" } # pr #6752 @test "podman selinux: inspect multiple labels" { - if [ ! -e /usr/sbin/selinuxenabled ] || ! /usr/sbin/selinuxenabled; then - skip "selinux disabled or not available" - fi + skip_if_no_selinux run_podman run -d --name myc \ --security-opt seccomp=unconfined \ @@ -75,4 +91,84 @@ function check_label() { run_podman rm -f myc } +# Sharing context between two containers not in a pod +# These tests were piggybacked in with #7902, but are not actually related +@test "podman selinux: shared context in (some) namespaces" { + skip_if_no_selinux + + run_podman run -d --name myctr $IMAGE top + run_podman exec myctr cat -v /proc/self/attr/current + context_c1="$output" + + # --ipc container + run_podman run --name myctr2 --ipc container:myctr $IMAGE cat -v /proc/self/attr/current + is "$output" "$context_c1" "new container, run with ipc of existing one " + + # --pid container + run_podman run --rm --pid container:myctr $IMAGE cat -v /proc/self/attr/current + is "$output" "$context_c1" "new container, run with --pid of existing one " + + # net NS: do not share context + run_podman run --rm --net container:myctr $IMAGE cat -v /proc/self/attr/current + if [[ "$output" = "$context_c1" ]]; then + die "run --net : context ($output) is same as running container (it should not be)" + fi + + # The 'myctr2' above was not run with --rm, so it still exists, and + # we can't remove the original container until this one is gone. + run_podman stop -t 0 myctr + run_podman 125 rm myctr + is "$output" "Error: container .* has dependent containers" + + # We have to do this in two steps: even if ordered as 'myctr2 myctr', + # podman will try the removes in random order, which fails if it + # tries myctr first. + run_podman rm myctr2 + run_podman rm myctr +} + +# pr #7902 - containers in pods should all run under same context +@test "podman selinux: containers in pods share full context" { + skip_if_no_selinux + + # We don't need a fullblown pause container; avoid pulling the k8s one + run_podman pod create --name myselinuxpod \ + --infra-image $IMAGE \ + --infra-command /home/podman/pause + + # Get baseline + run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current + context_c1="$output" + + # Prior to #7902, the labels (':c123,c456') would be different + run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current + is "$output" "$context_c1" "SELinux context of 2nd container matches 1st" + + # What the heck. Try a third time just for extra confidence + run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current + is "$output" "$context_c1" "SELinux context of 3rd container matches 1st" + + run_podman pod rm myselinuxpod +} + +# more pr #7902 +@test "podman selinux: containers in --no-infra pods do not share context" { + skip_if_no_selinux + + # We don't need a fullblown pause container; avoid pulling the k8s one + run_podman pod create --name myselinuxpod --infra=false + + # Get baseline + run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current + context_c1="$output" + + # Even after #7902, labels (':c123,c456') should be different + run_podman run --rm --pod myselinuxpod $IMAGE cat -v /proc/self/attr/current + if [[ "$output" = "$context_c1" ]]; then + die "context ($output) is the same on two separate containers, it should have been different" + fi + + run_podman pod rm myselinuxpod +} + # vim: filetype=sh diff --git a/test/system/helpers.bash b/test/system/helpers.bash index 998db5283..c6c2c12df 100644 --- a/test/system/helpers.bash +++ b/test/system/helpers.bash @@ -286,6 +286,17 @@ function skip_if_remote() { fi } +######################## +# skip_if_no_selinux # +######################## +function skip_if_no_selinux() { + if [ ! -e /usr/sbin/selinuxenabled ]; then + skip "selinux not available" + elif ! /usr/sbin/selinuxenabled; then + skip "selinux disabled" + fi +} + ######### # die # Abort with helpful message ######### |