diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/container_api.go | 20 | ||||
-rw-r--r-- | libpod/container_internal.go | 6 | ||||
-rw-r--r-- | libpod/container_internal_linux.go | 431 | ||||
-rw-r--r-- | libpod/container_internal_linux_test.go | 34 | ||||
-rw-r--r-- | libpod/container_validate.go | 11 | ||||
-rw-r--r-- | libpod/define/errors.go | 4 | ||||
-rw-r--r-- | libpod/image/image.go | 193 | ||||
-rw-r--r-- | libpod/image/layer_tree.go | 4 | ||||
-rw-r--r-- | libpod/image/prune.go | 2 | ||||
-rw-r--r-- | libpod/image/pull.go | 113 | ||||
-rw-r--r-- | libpod/image/pull_test.go | 38 | ||||
-rw-r--r-- | libpod/kube.go | 10 | ||||
-rw-r--r-- | libpod/networking_linux.go | 24 | ||||
-rw-r--r-- | libpod/networking_unsupported.go | 4 | ||||
-rw-r--r-- | libpod/oci_attach_linux.go | 5 | ||||
-rw-r--r-- | libpod/oci_attach_unsupported.go | 2 | ||||
-rw-r--r-- | libpod/oci_conmon_linux.go | 4 | ||||
-rw-r--r-- | libpod/options.go | 12 | ||||
-rw-r--r-- | libpod/rootless_cni_linux.go | 320 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 37 | ||||
-rw-r--r-- | libpod/runtime_img.go | 9 |
21 files changed, 1123 insertions, 160 deletions
diff --git a/libpod/container_api.go b/libpod/container_api.go index 0d7bbacd0..aef37dd59 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -9,6 +9,7 @@ import ( "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/events" + "github.com/containers/podman/v2/pkg/signal" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -129,7 +130,7 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt // Attach to the container before starting it go func() { - if err := c.attach(streams, keys, resize, true, startedChan); err != nil { + if err := c.attach(streams, keys, resize, true, startedChan, nil); err != nil { attachChan <- err } close(attachChan) @@ -243,8 +244,23 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <- return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers") } + // HACK: This is really gross, but there isn't a better way without + // splitting attach into separate versions for StartAndAttach and normal + // attaching, and I really do not want to do that right now. + // Send a SIGWINCH after attach succeeds so that most programs will + // redraw the screen for the new attach session. + attachRdy := make(chan bool) + if c.config.Spec.Process != nil && c.config.Spec.Process.Terminal { + go func() { + <-attachRdy + if err := c.ociRuntime.KillContainer(c, uint(signal.SIGWINCH), false); err != nil { + logrus.Warnf("Unable to send SIGWINCH to container %s after attach: %v", c.ID(), err) + } + }() + } + c.newContainerEvent(events.Attach) - return c.attach(streams, keys, resize, false, nil) + return c.attach(streams, keys, resize, false, nil, attachRdy) } // HTTPAttach forwards an attach session over a hijacked HTTP session. diff --git a/libpod/container_internal.go b/libpod/container_internal.go index c41d81a2b..5a0a0edfa 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -380,6 +380,8 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) { } dest.GIDMap = append(dest.GIDMap, g) } + dest.HostUIDMapping = false + dest.HostGIDMapping = false } } @@ -957,8 +959,10 @@ func (c *Container) completeNetworkSetup() error { if err := c.syncContainer(); err != nil { return err } - if c.config.NetMode.IsSlirp4netns() { + if rootless.IsRootless() { return c.runtime.setupRootlessNetNS(c) + } else if c.config.NetMode.IsSlirp4netns() { + return c.runtime.setupSlirp4netns(c) } if err := c.runtime.setupNetNS(c); err != nil { return err diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index f789b0069..86a28c176 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -36,7 +36,7 @@ import ( "github.com/containers/podman/v2/utils" "github.com/containers/storage/pkg/archive" securejoin "github.com/cyphar/filepath-securejoin" - User "github.com/opencontainers/runc/libcontainer/user" + runcuser "github.com/opencontainers/runc/libcontainer/user" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" @@ -84,7 +84,11 @@ func (c *Container) prepare() error { // Set up network namespace if not already set up noNetNS := c.state.NetNS == nil if c.config.CreateNetNS && noNetNS && !c.config.PostConfigureNetNS { - netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c) + if rootless.IsRootless() && len(c.config.Networks) > 0 { + netNS, networkStatus, createNetNSErr = AllocRootlessCNI(context.Background(), c) + } else { + netNS, networkStatus, createNetNSErr = c.runtime.createNetNS(c) + } if createNetNSErr != nil { return } @@ -98,8 +102,12 @@ func (c *Container) prepare() error { } // handle rootless network namespace setup - if noNetNS && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS { - createNetNSErr = c.runtime.setupRootlessNetNS(c) + if noNetNS && !c.config.PostConfigureNetNS { + if rootless.IsRootless() { + createNetNSErr = c.runtime.setupRootlessNetNS(c) + } else if c.config.NetMode.IsSlirp4netns() { + createNetNSErr = c.runtime.setupSlirp4netns(c) + } } }() // Mount storage if not mounted @@ -1268,7 +1276,7 @@ func (c *Container) makeBindMounts() error { // SHM is always added when we mount the container c.state.BindMounts["/dev/shm"] = c.config.ShmDir - newPasswd, err := c.generatePasswd() + newPasswd, newGroup, err := c.generatePasswdAndGroup() if err != nil { return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID()) } @@ -1278,9 +1286,16 @@ func (c *Container) makeBindMounts() error { // If it already exists, delete so we can recreate delete(c.state.BindMounts, "/etc/passwd") } - logrus.Debugf("adding entry to /etc/passwd for non existent default user") c.state.BindMounts["/etc/passwd"] = newPasswd } + if newGroup != "" { + // Make /etc/group + if _, ok := c.state.BindMounts["/etc/group"]; ok { + // If it already exists, delete so we can recreate + delete(c.state.BindMounts, "/etc/group") + } + c.state.BindMounts["/etc/group"] = newGroup + } // Make /etc/hostname // This should never change, so no need to recreate if it exists @@ -1499,23 +1514,171 @@ func (c *Container) getHosts() string { return hosts } +// generateGroupEntry generates an entry or entries into /etc/group as +// required by container configuration. +// Generatlly speaking, we will make an entry under two circumstances: +// 1. The container is started as a specific user:group, and that group is both +// numeric, and does not already exist in /etc/group. +// 2. It is requested that Libpod add the group that launched Podman to +// /etc/group via AddCurrentUserPasswdEntry (though this does not trigger if +// the group in question already exists in /etc/passwd). +// Returns group entry (as a string that can be appended to /etc/group) and any +// error that occurred. +func (c *Container) generateGroupEntry() (string, error) { + groupString := "" + + // Things we *can't* handle: adding the user we added in + // generatePasswdEntry to any *existing* groups. + addedGID := 0 + if c.config.AddCurrentUserPasswdEntry { + entry, gid, err := c.generateCurrentUserGroupEntry() + if err != nil { + return "", err + } + groupString += entry + addedGID = gid + } + if c.config.User != "" { + entry, _, err := c.generateUserGroupEntry(addedGID) + if err != nil { + return "", err + } + groupString += entry + } + + return groupString, nil +} + +// Make an entry in /etc/group for the group of the user running podman iff we +// are rootless. +func (c *Container) generateCurrentUserGroupEntry() (string, int, error) { + gid := rootless.GetRootlessGID() + if gid == 0 { + return "", 0, nil + } + + g, err := user.LookupGroupId(strconv.Itoa(gid)) + if err != nil { + return "", 0, errors.Wrapf(err, "failed to get current group") + } + + // Lookup group name to see if it exists in the image. + _, err = lookup.GetGroup(c.state.Mountpoint, g.Name) + if err != runcuser.ErrNoGroupEntries { + return "", 0, err + } + + // Lookup GID to see if it exists in the image. + _, err = lookup.GetGroup(c.state.Mountpoint, g.Gid) + if err != runcuser.ErrNoGroupEntries { + return "", 0, err + } + + // We need to get the username of the rootless user so we can add it to + // the group. + username := "" + uid := rootless.GetRootlessUID() + if uid != 0 { + u, err := user.LookupId(strconv.Itoa(uid)) + if err != nil { + return "", 0, errors.Wrapf(err, "failed to get current user to make group entry") + } + username = u.Username + } + + // Make the entry. + return fmt.Sprintf("%s:x:%s:%s\n", g.Name, g.Gid, username), gid, nil +} + +// Make an entry in /etc/group for the group the container was specified to run +// as. +func (c *Container) generateUserGroupEntry(addedGID int) (string, int, error) { + if c.config.User == "" { + return "", 0, nil + } + + splitUser := strings.SplitN(c.config.User, ":", 2) + group := splitUser[0] + if len(splitUser) > 1 { + group = splitUser[1] + } + + gid, err := strconv.ParseUint(group, 10, 32) + if err != nil { + return "", 0, nil + } + + if addedGID != 0 && addedGID == int(gid) { + return "", 0, nil + } + + // Check if the group already exists + _, err = lookup.GetGroup(c.state.Mountpoint, group) + if err != runcuser.ErrNoGroupEntries { + return "", 0, err + } + + return fmt.Sprintf("%d:x:%d:%s\n", gid, gid, splitUser[0]), int(gid), nil +} + +// generatePasswdEntry generates an entry or entries into /etc/passwd as +// required by container configuration. +// Generally speaking, we will make an entry under two circumstances: +// 1. The container is started as a specific user who is not in /etc/passwd. +// This only triggers if the user is given as a *numeric* ID. +// 2. It is requested that Libpod add the user that launched Podman to +// /etc/passwd via AddCurrentUserPasswdEntry (though this does not trigger if +// the user in question already exists in /etc/passwd) or the UID to be added +// is 0). +// Returns password entry (as a string that can be appended to /etc/passwd) and +// any error that occurred. +func (c *Container) generatePasswdEntry() (string, error) { + passwdString := "" + + addedUID := 0 + if c.config.AddCurrentUserPasswdEntry { + entry, uid, _, err := c.generateCurrentUserPasswdEntry() + if err != nil { + return "", err + } + passwdString += entry + addedUID = uid + } + if c.config.User != "" { + entry, _, _, err := c.generateUserPasswdEntry(addedUID) + if err != nil { + return "", err + } + passwdString += entry + } + + return passwdString, nil +} + // generateCurrentUserPasswdEntry generates an /etc/passwd entry for the user -// running the container engine -func (c *Container) generateCurrentUserPasswdEntry() (string, error) { +// running the container engine. +// Returns a passwd entry for the user, and the UID and GID of the added entry. +func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) { uid := rootless.GetRootlessUID() if uid == 0 { - return "", nil + return "", 0, 0, nil } - u, err := user.LookupId(strconv.Itoa(rootless.GetRootlessUID())) + u, err := user.LookupId(strconv.Itoa(uid)) if err != nil { - return "", errors.Wrapf(err, "failed to get current user") + return "", 0, 0, errors.Wrapf(err, "failed to get current user") } // Lookup the user to see if it exists in the container image. _, err = lookup.GetUser(c.state.Mountpoint, u.Username) - if err != User.ErrNoPasswdEntries { - return "", err + if err != runcuser.ErrNoPasswdEntries { + return "", 0, 0, err + } + + // Lookup the UID to see if it exists in the container image. + _, err = lookup.GetUser(c.state.Mountpoint, u.Uid) + if err != runcuser.ErrNoPasswdEntries { + return "", 0, 0, err } // If the user's actual home directory exists, or was mounted in - use @@ -1525,18 +1688,22 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, error) { homeDir = u.HomeDir } - return fmt.Sprintf("%s:x:%s:%s:%s:%s:/bin/sh\n", u.Username, u.Uid, u.Gid, u.Username, homeDir), nil + return fmt.Sprintf("%s:*:%s:%s:%s:%s:/bin/sh\n", u.Username, u.Uid, u.Gid, u.Username, homeDir), uid, rootless.GetRootlessGID(), nil } // generateUserPasswdEntry generates an /etc/passwd entry for the container user // to run in the container. -func (c *Container) generateUserPasswdEntry() (string, error) { +// The UID and GID of the added entry will also be returned. +// Accepts one argument, that being any UID that has already been added to the +// passwd file by other functions; if it matches the UID we were given, we don't +// need to do anything. +func (c *Container) generateUserPasswdEntry(addedUID int) (string, int, int, error) { var ( groupspec string gid int ) if c.config.User == "" { - return "", nil + return "", 0, 0, nil } splitSpec := strings.SplitN(c.config.User, ":", 2) userspec := splitSpec[0] @@ -1546,13 +1713,17 @@ func (c *Container) generateUserPasswdEntry() (string, error) { // If a non numeric User, then don't generate passwd uid, err := strconv.ParseUint(userspec, 10, 32) if err != nil { - return "", nil + return "", 0, 0, nil + } + + if addedUID != 0 && int(uid) == addedUID { + return "", 0, 0, nil } // Lookup the user to see if it exists in the container image _, err = lookup.GetUser(c.state.Mountpoint, userspec) - if err != User.ErrNoPasswdEntries { - return "", err + if err != runcuser.ErrNoPasswdEntries { + return "", 0, 0, err } if groupspec != "" { @@ -1562,96 +1733,180 @@ func (c *Container) generateUserPasswdEntry() (string, error) { } else { group, err := lookup.GetGroup(c.state.Mountpoint, groupspec) if err != nil { - return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec) + return "", 0, 0, errors.Wrapf(err, "unable to get gid %s from group file", groupspec) } gid = group.Gid } } - return fmt.Sprintf("%d:x:%d:%d:container user:%s:/bin/sh\n", uid, uid, gid, c.WorkingDir()), nil + return fmt.Sprintf("%d:*:%d:%d:container user:%s:/bin/sh\n", uid, uid, gid, c.WorkingDir()), int(uid), gid, nil } -// generatePasswd generates a container specific passwd file, -// iff g.config.User is a number -func (c *Container) generatePasswd() (string, error) { +// generatePasswdAndGroup generates container-specific passwd and group files +// iff g.config.User is a number or we are configured to make a passwd entry for +// the current user. +// Returns path to file to mount at /etc/passwd, path to file to mount at +// /etc/group, and any error that occurred. If no passwd/group file were +// required, the empty string will be returned for those path (this may occur +// even if no error happened). +// This may modify the mounted container's /etc/passwd and /etc/group instead of +// making copies to bind-mount in, so we don't break useradd (it wants to make a +// copy of /etc/passwd and rename the copy to /etc/passwd, which is impossible +// with a bind mount). This is done in cases where the container is *not* +// read-only. In this case, the function will return nothing ("", "", nil). +func (c *Container) generatePasswdAndGroup() (string, string, error) { if !c.config.AddCurrentUserPasswdEntry && c.config.User == "" { - return "", nil + return "", "", nil } + + needPasswd := true + needGroup := true + + // First, check if there's a mount at /etc/passwd or group, we don't + // want to interfere with user mounts. if MountExists(c.config.Spec.Mounts, "/etc/passwd") { - return "", nil + needPasswd = false } - // Re-use passwd if possible - passwdPath := filepath.Join(c.config.StaticDir, "passwd") - if _, err := os.Stat(passwdPath); err == nil { - return passwdPath, nil + if MountExists(c.config.Spec.Mounts, "/etc/group") { + needGroup = false } - // Check if container has a /etc/passwd - if it doesn't do nothing. - passwdPath, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd") - if err != nil { - return "", errors.Wrapf(err, "error creating path to container %s /etc/passwd", c.ID()) + + // Next, check if we already made the files. If we didn, don't need to + // do anything more. + if needPasswd { + passwdPath := filepath.Join(c.config.StaticDir, "passwd") + if _, err := os.Stat(passwdPath); err == nil { + needPasswd = false + } } - if _, err := os.Stat(passwdPath); err != nil { - if os.IsNotExist(err) { - return "", nil + if needGroup { + groupPath := filepath.Join(c.config.StaticDir, "group") + if _, err := os.Stat(groupPath); err == nil { + needGroup = false } - return "", errors.Wrapf(err, "unable to access container %s /etc/passwd", c.ID()) } - pwd := "" - if c.config.User != "" { - entry, err := c.generateUserPasswdEntry() + + // Next, check if the container even has a /etc/passwd or /etc/group. + // If it doesn't we don't want to create them ourselves. + if needPasswd { + exists, err := c.checkFileExistsInRootfs("/etc/passwd") if err != nil { - return "", err + return "", "", err } - pwd += entry + needPasswd = exists } - if c.config.AddCurrentUserPasswdEntry { - entry, err := c.generateCurrentUserPasswdEntry() + if needGroup { + exists, err := c.checkFileExistsInRootfs("/etc/group") if err != nil { - return "", err + return "", "", err } - pwd += entry + needGroup = exists } - if pwd == "" { - return "", nil + + // If we don't need a /etc/passwd or /etc/group at this point we can + // just return. + if !needPasswd && !needGroup { + return "", "", nil } - // If we are *not* read-only - edit /etc/passwd in the container. - // This is *gross* (shows up in changes to the container, will be - // committed to images based on the container) but it actually allows us - // to add users to the container (a bind mount breaks useradd). - // We should never get here twice, because generateUserPasswdEntry will - // not return anything if the user already exists in /etc/passwd. - if !c.IsReadOnly() { - containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd") + passwdPath := "" + groupPath := "" + + ro := c.IsReadOnly() + + if needPasswd { + passwdEntry, err := c.generatePasswdEntry() if err != nil { - return "", errors.Wrapf(err, "error looking up location of container %s /etc/passwd", c.ID()) + return "", "", err } - f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600) + needsWrite := passwdEntry != "" + switch { + case ro && needsWrite: + logrus.Debugf("Making /etc/passwd for container %s", c.ID()) + originPasswdFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd") + if err != nil { + return "", "", errors.Wrapf(err, "error creating path to container %s /etc/passwd", c.ID()) + } + orig, err := ioutil.ReadFile(originPasswdFile) + if err != nil && !os.IsNotExist(err) { + return "", "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile) + } + passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+passwdEntry) + if err != nil { + return "", "", errors.Wrapf(err, "failed to create temporary passwd file") + } + if err := os.Chmod(passwdFile, 0644); err != nil { + return "", "", err + } + passwdPath = passwdFile + case !ro && needsWrite: + logrus.Debugf("Modifying container %s /etc/passwd", c.ID()) + containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd") + if err != nil { + return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/passwd", c.ID()) + } + + f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return "", "", errors.Wrapf(err, "error opening container %s /etc/passwd", c.ID()) + } + defer f.Close() + + if _, err := f.WriteString(passwdEntry); err != nil { + return "", "", errors.Wrapf(err, "unable to append to container %s /etc/passwd", c.ID()) + } + default: + logrus.Debugf("Not modifying container %s /etc/passwd", c.ID()) + } + } + if needGroup { + groupEntry, err := c.generateGroupEntry() if err != nil { - return "", errors.Wrapf(err, "error opening container %s /etc/passwd", c.ID()) + return "", "", err } - defer f.Close() - if _, err := f.WriteString(pwd); err != nil { - return "", errors.Wrapf(err, "unable to append to container %s /etc/passwd", c.ID()) - } + needsWrite := groupEntry != "" + switch { + case ro && needsWrite: + logrus.Debugf("Making /etc/group for container %s", c.ID()) + originGroupFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group") + if err != nil { + return "", "", errors.Wrapf(err, "error creating path to container %s /etc/group", c.ID()) + } + orig, err := ioutil.ReadFile(originGroupFile) + if err != nil && !os.IsNotExist(err) { + return "", "", errors.Wrapf(err, "unable to read group file %s", originGroupFile) + } + groupFile, err := c.writeStringToStaticDir("group", string(orig)+groupEntry) + if err != nil { + return "", "", errors.Wrapf(err, "failed to create temporary group file") + } + if err := os.Chmod(groupFile, 0644); err != nil { + return "", "", err + } + groupPath = groupFile + case !ro && needsWrite: + logrus.Debugf("Modifying container %s /etc/group", c.ID()) + containerGroup, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group") + if err != nil { + return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/group", c.ID()) + } - return "", nil - } + f, err := os.OpenFile(containerGroup, os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return "", "", errors.Wrapf(err, "error opening container %s /etc/group", c.ID()) + } + defer f.Close() - originPasswdFile := filepath.Join(c.state.Mountpoint, "/etc/passwd") - orig, err := ioutil.ReadFile(originPasswdFile) - if err != nil && !os.IsNotExist(err) { - return "", errors.Wrapf(err, "unable to read passwd file %s", originPasswdFile) - } - passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+pwd) - if err != nil { - return "", errors.Wrapf(err, "failed to create temporary passwd file") - } - if err := os.Chmod(passwdFile, 0644); err != nil { - return "", err + if _, err := f.WriteString(groupEntry); err != nil { + return "", "", errors.Wrapf(err, "unable to append to container %s /etc/group", c.ID()) + } + default: + logrus.Debugf("Not modifying container %s /etc/group", c.ID()) + } } - return passwdFile, nil + + return passwdPath, groupPath, nil } func (c *Container) copyOwnerAndPerms(source, dest string) error { @@ -1743,3 +1998,23 @@ func (c *Container) copyTimezoneFile(zonePath string) (string, error) { func (c *Container) cleanupOverlayMounts() error { return overlay.CleanupContent(c.config.StaticDir) } + +// Check if a file exists at the given path in the container's root filesystem. +// Container must already be mounted for this to be used. +func (c *Container) checkFileExistsInRootfs(file string) (bool, error) { + checkPath, err := securejoin.SecureJoin(c.state.Mountpoint, file) + if err != nil { + return false, errors.Wrapf(err, "cannot create path to container %s file %q", c.ID(), file) + } + stat, err := os.Stat(checkPath) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, errors.Wrapf(err, "error accessing container %s file %q", c.ID(), file) + } + if stat.IsDir() { + return false, nil + } + return true, nil +} diff --git a/libpod/container_internal_linux_test.go b/libpod/container_internal_linux_test.go index 41c22fb45..1465ffbea 100644 --- a/libpod/container_internal_linux_test.go +++ b/libpod/container_internal_linux_test.go @@ -29,16 +29,42 @@ func TestGenerateUserPasswdEntry(t *testing.T) { Mountpoint: "/does/not/exist/tmp/", }, } - user, err := c.generateUserPasswdEntry() + user, _, _, err := c.generateUserPasswdEntry(0) if err != nil { t.Fatal(err) } - assert.Equal(t, user, "123:x:123:456:container user:/:/bin/sh\n") + assert.Equal(t, user, "123:*:123:456:container user:/:/bin/sh\n") c.config.User = "567" - user, err = c.generateUserPasswdEntry() + user, _, _, err = c.generateUserPasswdEntry(0) if err != nil { t.Fatal(err) } - assert.Equal(t, user, "567:x:567:0:container user:/:/bin/sh\n") + assert.Equal(t, user, "567:*:567:0:container user:/:/bin/sh\n") +} + +func TestGenerateUserGroupEntry(t *testing.T) { + c := Container{ + config: &ContainerConfig{ + Spec: &spec.Spec{}, + ContainerSecurityConfig: ContainerSecurityConfig{ + User: "123:456", + }, + }, + state: &ContainerState{ + Mountpoint: "/does/not/exist/tmp/", + }, + } + group, _, err := c.generateUserGroupEntry(0) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, group, "456:x:456:123\n") + + c.config.User = "567" + group, _, err = c.generateUserGroupEntry(0) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, group, "567:x:567:567\n") } diff --git a/libpod/container_validate.go b/libpod/container_validate.go index d657e3549..b78168cd1 100644 --- a/libpod/container_validate.go +++ b/libpod/container_validate.go @@ -2,7 +2,6 @@ package libpod import ( "github.com/containers/podman/v2/libpod/define" - "github.com/containers/podman/v2/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -68,16 +67,6 @@ func (c *Container) validate() error { } } - // Rootless has some requirements, compared to networks. - if rootless.IsRootless() { - if len(c.config.Networks) > 0 { - return errors.Wrapf(define.ErrInvalidArg, "cannot join CNI networks if running rootless") - } - - // TODO: Should we make sure network mode is set to Slirp if set - // at all? - } - // Can only set static IP or MAC is creating a network namespace. if !c.config.CreateNetNS && (c.config.StaticIP != nil || c.config.StaticMAC != nil) { return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if not creating a network namespace") diff --git a/libpod/define/errors.go b/libpod/define/errors.go index f80b1d6e3..7714ebbf0 100644 --- a/libpod/define/errors.go +++ b/libpod/define/errors.go @@ -161,4 +161,8 @@ var ( // ErrNetworkOnPodContainer indicates the user wishes to alter network attributes on a container // in a pod. This cannot be done as the infra container has all the network information ErrNetworkOnPodContainer = errors.New("network cannot be configured when it is shared with a pod") + + // ErrStoreNotInitialized indicates that the container storage was never + // initilized. + ErrStoreNotInitialized = errors.New("the container storage was never initilized") ) diff --git a/libpod/image/image.go b/libpod/image/image.go index dee2ce0ee..850a48eae 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -17,6 +17,7 @@ import ( "github.com/containers/common/pkg/retry" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/directory" + "github.com/containers/image/v5/docker/archive" dockerarchive "github.com/containers/image/v5/docker/archive" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" @@ -173,13 +174,182 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile return newImage, nil } +// SaveImages stores one more images in a multi-image archive. +// Note that only `docker-archive` supports storing multiple +// image. +func (ir *Runtime) SaveImages(ctx context.Context, namesOrIDs []string, format string, outputFile string, quiet bool) (finalErr error) { + if format != DockerArchive { + return errors.Errorf("multi-image archives are only supported in in the %q format", DockerArchive) + } + + sys := GetSystemContext("", "", false) + + archWriter, err := archive.NewWriter(sys, outputFile) + if err != nil { + return err + } + defer func() { + err := archWriter.Close() + if err == nil { + return + } + if finalErr == nil { + finalErr = err + return + } + finalErr = errors.Wrap(finalErr, err.Error()) + }() + + // Decide whether c/image's progress bars should use stderr or stdout. + // Use stderr in case we need to be quiet or if the output is set to + // stdout. If the output is set of stdout, any log message there would + // corrupt the tarfile. + writer := os.Stdout + if quiet { + writer = os.Stderr + } + + // extend an image with additional tags + type imageData struct { + *Image + tags []reference.NamedTagged + } + + // Look up the images (and their tags) in the local storage. + imageMap := make(map[string]*imageData) // to group tags for an image + imageQueue := []string{} // to preserve relative image order + for _, nameOrID := range namesOrIDs { + // Look up the name or ID in the local image storage. + localImage, err := ir.NewFromLocal(nameOrID) + if err != nil { + return err + } + id := localImage.ID() + + iData, exists := imageMap[id] + if !exists { + imageQueue = append(imageQueue, id) + iData = &imageData{Image: localImage} + imageMap[id] = iData + } + + // Unless we referred to an ID, add the input as a tag. + if !strings.HasPrefix(id, nameOrID) { + tag, err := NormalizedTag(nameOrID) + if err != nil { + return err + } + refTagged, isTagged := tag.(reference.NamedTagged) + if isTagged { + iData.tags = append(iData.tags, refTagged) + } + } + } + + policyContext, err := getPolicyContext(sys) + if err != nil { + return err + } + defer func() { + if err := policyContext.Destroy(); err != nil { + logrus.Errorf("failed to destroy policy context: %q", err) + } + }() + + // Now copy the images one-by-one. + for _, id := range imageQueue { + dest, err := archWriter.NewReference(nil) + if err != nil { + return err + } + + img := imageMap[id] + copyOptions := getCopyOptions(sys, writer, nil, nil, SigningOptions{}, "", img.tags) + copyOptions.DestinationCtx.SystemRegistriesConfPath = registries.SystemRegistriesConfPath() + + // For copying, we need a source reference that we can create + // from the image. + src, err := is.Transport.NewStoreReference(img.imageruntime.store, nil, id) + if err != nil { + return errors.Wrapf(err, "error getting source imageReference for %q", img.InputName) + } + _, err = cp.Image(ctx, policyContext, dest, src, copyOptions) + if err != nil { + return err + } + } + + return nil +} + +// LoadAllImagesFromDockerArchive loads all images from the docker archive that +// fileName points to. +func (ir *Runtime) LoadAllImagesFromDockerArchive(ctx context.Context, fileName string, signaturePolicyPath string, writer io.Writer) ([]*Image, error) { + if signaturePolicyPath == "" { + signaturePolicyPath = ir.SignaturePolicyPath + } + + sc := GetSystemContext(signaturePolicyPath, "", false) + reader, err := archive.NewReader(sc, fileName) + if err != nil { + return nil, err + } + + defer func() { + if err := reader.Close(); err != nil { + logrus.Errorf(err.Error()) + } + }() + + refLists, err := reader.List() + if err != nil { + return nil, err + } + + refPairs := []pullRefPair{} + for _, refList := range refLists { + for _, ref := range refList { + pairs, err := ir.getPullRefPairsFromDockerArchiveReference(ctx, reader, ref, sc) + if err != nil { + return nil, err + } + refPairs = append(refPairs, pairs...) + } + } + + goal := pullGoal{ + pullAllPairs: true, + usedSearchRegistries: false, + refPairs: refPairs, + searchedRegistries: nil, + } + + defer goal.cleanUp() + imageNames, err := ir.doPullImage(ctx, sc, goal, writer, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}, nil) + if err != nil { + return nil, err + } + + newImages := make([]*Image, 0, len(imageNames)) + for _, name := range imageNames { + newImage, err := ir.NewFromLocal(name) + if err != nil { + return nil, errors.Wrapf(err, "error retrieving local image after pulling %s", name) + } + newImages = append(newImages, newImage) + } + ir.newImageEvent(events.LoadFromArchive, "") + return newImages, nil +} + // LoadFromArchiveReference creates a new image object for images pulled from a tar archive and the like (podman load) // This function is needed because it is possible for a tar archive to have multiple tags for one image func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*Image, error) { if signaturePolicyPath == "" { signaturePolicyPath = ir.SignaturePolicyPath } - imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{MaxRetry: maxRetry}) + + imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{}) if err != nil { return nil, errors.Wrapf(err, "unable to pull %s", transports.ImageName(srcRef)) } @@ -466,6 +636,14 @@ func (ir *Runtime) getImage(image string) (*storage.Image, error) { return img, nil } +func (ir *Runtime) ImageNames(id string) ([]string, error) { + myImage, err := ir.getImage(id) + if err != nil { + return nil, errors.Wrapf(err, "error getting image %s ", id) + } + return myImage.Names, nil +} + // GetImages retrieves all images present in storage func (ir *Runtime) GetImages() ([]*Image, error) { return ir.getImages(false) @@ -1247,11 +1425,14 @@ func areParentAndChild(parent, child *imgspecv1.Image) bool { // candidate parent's diff IDs, which together would have // controlled which layers were used - // issue #7444 describes a panic where the length of child.RootFS.DiffIDs - // is checked but child is nil. Adding a simple band-aid approach to prevent - // the problem until the origin of the problem can be worked out in the issue - // itself. - if child == nil || len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) { + // Both, child and parent, may be nil when the storage is left in an + // incoherent state. Issue #7444 describes such a case when a build + // has been killed. + if child == nil || parent == nil { + return false + } + + if len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) { return false } childUsesCandidateDiffs := true diff --git a/libpod/image/layer_tree.go b/libpod/image/layer_tree.go index 3699655fd..18101575e 100644 --- a/libpod/image/layer_tree.go +++ b/libpod/image/layer_tree.go @@ -32,7 +32,9 @@ func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) { oci, exists := t.ociCache[i.ID()] if !exists { oci, err = i.ociv1Image(ctx) - t.ociCache[i.ID()] = oci + if err == nil { + t.ociCache[i.ID()] = oci + } } return oci, err } diff --git a/libpod/image/prune.go b/libpod/image/prune.go index 5a9ca5d8e..fcc65fb03 100644 --- a/libpod/image/prune.go +++ b/libpod/image/prune.go @@ -137,7 +137,7 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ( } if err := p.Remove(ctx, true); err != nil { if errors.Cause(err) == storage.ErrImageUsedByContainer { - logrus.Warnf("Failed to prune image %s as it is in use: %v", p.ID(), err) + logrus.Warnf("Failed to prune image %s as it is in use: %v.\nA container associated with containers/storage i.e. Buildah, CRI-O, etc., maybe associated with this image.\nUsing the rmi command with the --force option will remove the container and image, but may cause failures for other dependent systems.", p.ID(), err) continue } return nil, errors.Wrap(err, "failed to prune image") diff --git a/libpod/image/pull.go b/libpod/image/pull.go index bdcda4016..94d6af4c2 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -11,8 +11,8 @@ import ( cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/directory" "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/archive" dockerarchive "github.com/containers/image/v5/docker/archive" - "github.com/containers/image/v5/docker/tarfile" ociarchive "github.com/containers/image/v5/oci/archive" oci "github.com/containers/image/v5/oci/layout" is "github.com/containers/image/v5/storage" @@ -61,12 +61,26 @@ type pullRefPair struct { dstRef types.ImageReference } +// cleanUpFunc is a function prototype for clean-up functions. +type cleanUpFunc func() error + // pullGoal represents the prepared image references and decided behavior to be executed by imagePull type pullGoal struct { refPairs []pullRefPair - pullAllPairs bool // Pull all refPairs instead of stopping on first success. - usedSearchRegistries bool // refPairs construction has depended on registries.GetRegistries() - searchedRegistries []string // The list of search registries used; set only if usedSearchRegistries + pullAllPairs bool // Pull all refPairs instead of stopping on first success. + usedSearchRegistries bool // refPairs construction has depended on registries.GetRegistries() + searchedRegistries []string // The list of search registries used; set only if usedSearchRegistries + cleanUpFuncs []cleanUpFunc // Mainly used to close long-lived objects (e.g., an archive.Reader) +} + +// cleanUp invokes all cleanUpFuncs. Certain resources may not be available +// anymore. Errors are logged. +func (p *pullGoal) cleanUp() { + for _, f := range p.cleanUpFuncs { + if err := f(); err != nil { + logrus.Error(err.Error()) + } + } } // singlePullRefPairGoal returns a no-frills pull goal for the specified reference pair. @@ -114,7 +128,49 @@ func (ir *Runtime) getSinglePullRefPairGoal(srcRef types.ImageReference, destNam return singlePullRefPairGoal(rp), nil } +// getPullRefPairsFromDockerArchiveReference returns a slice of pullRefPairs +// for the specified docker reference and the corresponding archive.Reader. +func (ir *Runtime) getPullRefPairsFromDockerArchiveReference(ctx context.Context, reader *archive.Reader, ref types.ImageReference, sc *types.SystemContext) ([]pullRefPair, error) { + destNames, err := reader.ManifestTagsForReference(ref) + if err != nil { + return nil, err + } + + if len(destNames) == 0 { + destName, err := getImageDigest(ctx, ref, sc) + if err != nil { + return nil, err + } + destNames = append(destNames, destName) + } else { + for i := range destNames { + ref, err := NormalizedTag(destNames[i]) + if err != nil { + return nil, err + } + destNames[i] = ref.String() + } + } + + refPairs := []pullRefPair{} + for _, destName := range destNames { + destRef, err := is.Transport.ParseStoreReference(ir.store, destName) + if err != nil { + return nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName) + } + pair := pullRefPair{ + image: destName, + srcRef: ref, + dstRef: destRef, + } + refPairs = append(refPairs, pair) + } + + return refPairs, nil +} + // pullGoalFromImageReference returns a pull goal for a single ImageReference, depending on the used transport. +// Note that callers are responsible for invoking (*pullGoal).cleanUp() to clean up possibly open resources. func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types.ImageReference, imgName string, sc *types.SystemContext) (*pullGoal, error) { span, _ := opentracing.StartSpanFromContext(ctx, "pullGoalFromImageReference") defer span.Finish() @@ -122,57 +178,26 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types. // supports pulling from docker-archive, oci, and registries switch srcRef.Transport().Name() { case DockerArchive: - archivePath := srcRef.StringWithinTransport() - tarSource, err := tarfile.NewSourceFromFile(archivePath) + reader, readerRef, err := archive.NewReaderForReference(sc, srcRef) if err != nil { return nil, err } - defer tarSource.Close() - manifest, err := tarSource.LoadTarManifest() + pairs, err := ir.getPullRefPairsFromDockerArchiveReference(ctx, reader, readerRef, sc) if err != nil { - return nil, errors.Wrapf(err, "error retrieving manifest.json") - } - // to pull the first image stored in the tar file - if len(manifest) == 0 { - // use the hex of the digest if no manifest is found - reference, err := getImageDigest(ctx, srcRef, sc) - if err != nil { - return nil, err - } - return ir.getSinglePullRefPairGoal(srcRef, reference) - } - - if len(manifest[0].RepoTags) == 0 { - // If the input image has no repotags, we need to feed it a dest anyways - digest, err := getImageDigest(ctx, srcRef, sc) - if err != nil { - return nil, err + // No need to defer for a single error path. + if err := reader.Close(); err != nil { + logrus.Error(err.Error()) } - return ir.getSinglePullRefPairGoal(srcRef, digest) + return nil, err } - // Need to load in all the repo tags from the manifest - res := []pullRefPair{} - for _, dst := range manifest[0].RepoTags { - //check if image exists and gives a warning of untagging - localImage, err := ir.NewFromLocal(dst) - imageID := strings.TrimSuffix(manifest[0].Config, ".json") - if err == nil && imageID != localImage.ID() { - logrus.Errorf("the image %s already exists, renaming the old one with ID %s to empty string", dst, localImage.ID()) - } - - pullInfo, err := ir.getPullRefPair(srcRef, dst) - if err != nil { - return nil, err - } - res = append(res, pullInfo) - } return &pullGoal{ - refPairs: res, pullAllPairs: true, usedSearchRegistries: false, + refPairs: pairs, searchedRegistries: nil, + cleanUpFuncs: []cleanUpFunc{reader.Close}, }, nil case OCIArchive: @@ -249,6 +274,7 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s return nil, errors.Wrapf(err, "error determining pull goal for image %q", inputName) } } + defer goal.cleanUp() return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label) } @@ -267,6 +293,7 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag if err != nil { return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef)) } + defer goal.cleanUp() return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil) } diff --git a/libpod/image/pull_test.go b/libpod/image/pull_test.go index 0046cdfef..6cb80e8b5 100644 --- a/libpod/image/pull_test.go +++ b/libpod/image/pull_test.go @@ -150,7 +150,7 @@ func TestPullGoalFromImageReference(t *testing.T) { { // RepoTags is empty "docker-archive:testdata/docker-unnamed.tar.xz", []expected{{"@ec9293436c2e66da44edb9efb8d41f6b13baf62283ebe846468bc992d76d7951", "@ec9293436c2e66da44edb9efb8d41f6b13baf62283ebe846468bc992d76d7951"}}, - false, + true, }, { // RepoTags is a [docker.io/library/]name:latest, normalized to the short format. "docker-archive:testdata/docker-name-only.tar.xz", @@ -170,11 +170,37 @@ func TestPullGoalFromImageReference(t *testing.T) { }, true, }, - { // FIXME: Two images in a single archive - only the "first" one (whichever it is) is returned - // (and docker-archive: then refuses to read anything when the manifest has more than 1 item) + { // Reference image by name in multi-image archive + "docker-archive:testdata/docker-two-images.tar.xz:example.com/empty:latest", + []expected{ + {"example.com/empty:latest", "example.com/empty:latest"}, + }, + true, + }, + { // Reference image by name in multi-image archive + "docker-archive:testdata/docker-two-images.tar.xz:example.com/empty/but:different", + []expected{ + {"example.com/empty/but:different", "example.com/empty/but:different"}, + }, + true, + }, + { // Reference image by index in multi-image archive + "docker-archive:testdata/docker-two-images.tar.xz:@0", + []expected{ + {"example.com/empty:latest", "example.com/empty:latest"}, + }, + true, + }, + { // Reference image by index in multi-image archive + "docker-archive:testdata/docker-two-images.tar.xz:@1", + []expected{ + {"example.com/empty/but:different", "example.com/empty/but:different"}, + }, + true, + }, + { // Reference entire multi-image archive must fail (more than one manifest) "docker-archive:testdata/docker-two-images.tar.xz", - []expected{{"example.com/empty:latest", "example.com/empty:latest"}}, - // "example.com/empty/but:different" exists but is ignored + []expected{}, true, }, @@ -248,7 +274,7 @@ func TestPullGoalFromImageReference(t *testing.T) { for i, e := range c.expected { testDescription := fmt.Sprintf("%s #%d", c.srcName, i) assert.Equal(t, e.image, res.refPairs[i].image, testDescription) - assert.Equal(t, srcRef, res.refPairs[i].srcRef, testDescription) + assert.Equal(t, transports.ImageName(srcRef), transports.ImageName(res.refPairs[i].srcRef), testDescription) assert.Equal(t, e.dstName, storageReferenceWithoutLocation(res.refPairs[i].dstRef), testDescription) } assert.Equal(t, c.expectedPullAllPairs, res.pullAllPairs, c.srcName) diff --git a/libpod/kube.go b/libpod/kube.go index 5f2c9e0fd..9d5cbe68b 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -69,12 +69,20 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) { return nil, servicePorts, err } servicePorts = containerPortsToServicePorts(ports) + } pod, err := p.podWithContainers(allContainers, ports) if err != nil { return nil, servicePorts, err } pod.Spec.HostAliases = extraHost + + if p.SharesPID() { + // unfortunately, go doesn't have a nice way to specify a pointer to a bool + b := true + pod.Spec.ShareProcessNamespace = &b + } + return pod, servicePorts, nil } @@ -191,7 +199,7 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1. labels["app"] = removeUnderscores(podName) om := v12.ObjectMeta{ // The name of the pod is container_name-libpod - Name: removeUnderscores(podName), + Name: podName, Labels: labels, // CreationTimestamp seems to be required, so adding it; in doing so, the timestamp // will reflect time this is run (not container create time) because the conversion diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 6f266e5d6..c0508ce39 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -4,6 +4,7 @@ package libpod import ( "bytes" + "context" "crypto/rand" "fmt" "io" @@ -208,6 +209,20 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) { // Configure the network namespace for a rootless container func (r *Runtime) setupRootlessNetNS(ctr *Container) error { + if ctr.config.NetMode.IsSlirp4netns() { + return r.setupSlirp4netns(ctr) + } + if len(ctr.config.Networks) > 0 { + // set up port forwarder for CNI-in-slirp4netns + netnsPath := ctr.state.NetNS.Path() + // TODO: support slirp4netns port forwarder as well + return r.setupRootlessPortMappingViaRLK(ctr, netnsPath) + } + return nil +} + +// setupSlirp4netns can be called in rootful as well as in rootless +func (r *Runtime) setupSlirp4netns(ctr *Container) error { path := r.config.Engine.NetworkCmdPath if path == "" { @@ -711,7 +726,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) - // rootless containers do not use the CNI plugin + // rootless containers do not use the CNI plugin directly if !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { var requestedIP net.IP if ctr.requestedIP != nil { @@ -738,6 +753,13 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { } } + // CNI-in-slirp4netns + if rootless.IsRootless() && len(ctr.config.Networks) != 0 { + if err := DeallocRootlessCNI(context.Background(), ctr); err != nil { + return errors.Wrapf(err, "error tearing down CNI-in-slirp4netns for container %s", ctr.ID()) + } + } + // First unmount the namespace if err := netns.UnmountNS(ctr.state.NetNS); err != nil { return errors.Wrapf(err, "error unmounting network namespace for container %s", ctr.ID()) diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go index dd72a3fd8..76bb01424 100644 --- a/libpod/networking_unsupported.go +++ b/libpod/networking_unsupported.go @@ -8,6 +8,10 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) error { return define.ErrNotImplemented } +func (r *Runtime) setupSlirp4netns(ctr *Container) error { + return define.ErrNotImplemented +} + func (r *Runtime) setupNetNS(ctr *Container) error { return define.ErrNotImplemented } diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go index 622c613d9..74af449ed 100644 --- a/libpod/oci_attach_linux.go +++ b/libpod/oci_attach_linux.go @@ -31,7 +31,7 @@ const ( // Attach to the given container // Does not check if state is appropriate // started is only required if startContainer is true -func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error { +func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error { if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput { return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") } @@ -74,6 +74,9 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <- } receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys) + if attachRdy != nil { + attachRdy <- true + } return readStdio(streams, receiveStdoutError, stdinDone) } diff --git a/libpod/oci_attach_unsupported.go b/libpod/oci_attach_unsupported.go index cd7c674b2..317dfdc90 100644 --- a/libpod/oci_attach_unsupported.go +++ b/libpod/oci_attach_unsupported.go @@ -9,7 +9,7 @@ import ( "k8s.io/client-go/tools/remotecommand" ) -func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool) error { +func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error { return define.ErrNotImplemented } diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index 23b020faa..5769e5580 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -1090,7 +1090,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe) cmd.ExtraFiles = append(cmd.ExtraFiles, envFiles...) - if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() { + if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { ports, err := bindPorts(ctr.config.PortMappings) if err != nil { return err @@ -1102,7 +1102,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co cmd.ExtraFiles = append(cmd.ExtraFiles, ports...) } - if ctr.config.NetMode.IsSlirp4netns() { + if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() { if ctr.config.PostConfigureNetNS { havePortMapping := len(ctr.Config().PortMappings) > 0 if havePortMapping { diff --git a/libpod/options.go b/libpod/options.go index 577c03810..61379710d 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -18,6 +18,7 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -908,6 +909,17 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption { ctr.config.UserNsCtr = nsCtr.ID() ctr.config.IDMappings = nsCtr.config.IDMappings + g := generate.NewFromSpec(ctr.config.Spec) + + g.ClearLinuxUIDMappings() + for _, uidmap := range nsCtr.config.IDMappings.UIDMap { + g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size)) + } + g.ClearLinuxGIDMappings() + for _, gidmap := range nsCtr.config.IDMappings.GIDMap { + g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size)) + } + ctr.config.IDMappings = nsCtr.config.IDMappings return nil } } diff --git a/libpod/rootless_cni_linux.go b/libpod/rootless_cni_linux.go new file mode 100644 index 000000000..76dbfdcae --- /dev/null +++ b/libpod/rootless_cni_linux.go @@ -0,0 +1,320 @@ +// +build linux + +package libpod + +import ( + "bytes" + "context" + "io" + "path/filepath" + "runtime" + + cnitypes "github.com/containernetworking/cni/pkg/types/current" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/podman/v2/libpod/define" + "github.com/containers/podman/v2/libpod/image" + "github.com/containers/podman/v2/pkg/util" + "github.com/containers/storage/pkg/lockfile" + "github.com/hashicorp/go-multierror" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var rootlessCNIInfraImage = map[string]string{ + // Built from ../contrib/rootless-cni-infra + // TODO: move to Podman's official quay + "amd64": "ghcr.io/akihirosuda/podman-rootless-cni-infra:gd34868a13-amd64", +} + +const ( + rootlessCNIInfraContainerNamespace = "podman-system" + rootlessCNIInfraContainerName = "rootless-cni-infra" +) + +// AllocRootlessCNI allocates a CNI netns inside the rootless CNI infra container. +// Locks "rootless-cni-infra.lck". +// +// When the infra container is not running, it is created. +// +// AllocRootlessCNI does not lock c. c should be already locked. +func AllocRootlessCNI(ctx context.Context, c *Container) (ns.NetNS, []*cnitypes.Result, error) { + if len(c.config.Networks) == 0 { + return nil, nil, errors.New("allocRootlessCNI shall not be called when len(c.config.Networks) == 0") + } + l, err := getRootlessCNIInfraLock(c.runtime) + if err != nil { + return nil, nil, err + } + l.Lock() + defer l.Unlock() + infra, err := ensureRootlessCNIInfraContainerRunning(ctx, c.runtime) + if err != nil { + return nil, nil, err + } + k8sPodName := getPodOrContainerName(c) // passed to CNI as K8S_POD_NAME + cniResults := make([]*cnitypes.Result, len(c.config.Networks)) + for i, nw := range c.config.Networks { + cniRes, err := rootlessCNIInfraCallAlloc(infra, c.ID(), nw, k8sPodName) + if err != nil { + return nil, nil, err + } + cniResults[i] = cniRes + } + nsObj, err := rootlessCNIInfraGetNS(infra, c.ID()) + if err != nil { + return nil, nil, err + } + logrus.Debugf("rootless CNI: container %q will join %q", c.ID(), nsObj.Path()) + return nsObj, cniResults, nil +} + +// DeallocRootlessCNI deallocates a CNI netns inside the rootless CNI infra container. +// Locks "rootless-cni-infra.lck". +// +// When the infra container is no longer needed, it is removed. +// +// DeallocRootlessCNI does not lock c. c should be already locked. +func DeallocRootlessCNI(ctx context.Context, c *Container) error { + if len(c.config.Networks) == 0 { + return errors.New("deallocRootlessCNI shall not be called when len(c.config.Networks) == 0") + } + l, err := getRootlessCNIInfraLock(c.runtime) + if err != nil { + return err + } + l.Lock() + defer l.Unlock() + infra, _ := getRootlessCNIInfraContainer(c.runtime) + if infra == nil { + return nil + } + var errs *multierror.Error + for _, nw := range c.config.Networks { + err := rootlessCNIInfraCallDelloc(infra, c.ID(), nw) + if err != nil { + errs = multierror.Append(errs, err) + } + } + if isIdle, err := rootlessCNIInfraIsIdle(infra); isIdle || err != nil { + if err != nil { + logrus.Warn(err) + } + logrus.Debugf("rootless CNI: removing infra container %q", infra.ID()) + if err := c.runtime.removeContainer(ctx, infra, true, false, true); err != nil { + return err + } + logrus.Debugf("rootless CNI: removed infra container %q", infra.ID()) + } + return errs.ErrorOrNil() +} + +func getRootlessCNIInfraLock(r *Runtime) (lockfile.Locker, error) { + fname := filepath.Join(r.config.Engine.TmpDir, "rootless-cni-infra.lck") + return lockfile.GetLockfile(fname) +} + +func getPodOrContainerName(c *Container) string { + pod, err := c.runtime.GetPod(c.PodID()) + if err != nil || pod.config.Name == "" { + return c.Name() + } + return pod.config.Name +} + +func rootlessCNIInfraCallAlloc(infra *Container, id, nw, k8sPodName string) (*cnitypes.Result, error) { + logrus.Debugf("rootless CNI: alloc %q, %q, %q", id, nw, k8sPodName) + var err error + + _, err = rootlessCNIInfraExec(infra, "alloc", id, nw, k8sPodName) + if err != nil { + return nil, err + } + cniResStr, err := rootlessCNIInfraExec(infra, "print-cni-result", id, nw) + if err != nil { + return nil, err + } + var cniRes cnitypes.Result + if err := json.Unmarshal([]byte(cniResStr), &cniRes); err != nil { + return nil, errors.Wrapf(err, "unmarshaling as cnitypes.Result: %q", cniResStr) + } + return &cniRes, nil +} + +func rootlessCNIInfraCallDelloc(infra *Container, id, nw string) error { + logrus.Debugf("rootless CNI: dealloc %q, %q", id, nw) + _, err := rootlessCNIInfraExec(infra, "dealloc", id, nw) + return err +} + +func rootlessCNIInfraIsIdle(infra *Container) (bool, error) { + type isIdle struct { + Idle bool `json:"idle"` + } + resStr, err := rootlessCNIInfraExec(infra, "is-idle") + if err != nil { + return false, err + } + var res isIdle + if err := json.Unmarshal([]byte(resStr), &res); err != nil { + return false, errors.Wrapf(err, "unmarshaling as isIdle: %q", resStr) + } + return res.Idle, nil +} + +func rootlessCNIInfraGetNS(infra *Container, id string) (ns.NetNS, error) { + type printNetnsPath struct { + Path string `json:"path"` + } + resStr, err := rootlessCNIInfraExec(infra, "print-netns-path", id) + if err != nil { + return nil, err + } + var res printNetnsPath + if err := json.Unmarshal([]byte(resStr), &res); err != nil { + return nil, errors.Wrapf(err, "unmarshaling as printNetnsPath: %q", resStr) + } + nsObj, err := ns.GetNS(res.Path) + if err != nil { + return nil, err + } + return nsObj, nil +} + +func getRootlessCNIInfraContainer(r *Runtime) (*Container, error) { + containers, err := r.GetContainersWithoutLock(func(c *Container) bool { + return c.Namespace() == rootlessCNIInfraContainerNamespace && + c.Name() == rootlessCNIInfraContainerName + }) + if err != nil { + return nil, err + } + if len(containers) == 0 { + return nil, nil + } + return containers[0], nil +} + +func ensureRootlessCNIInfraContainerRunning(ctx context.Context, r *Runtime) (*Container, error) { + c, err := getRootlessCNIInfraContainer(r) + if err != nil { + return nil, err + } + if c == nil { + return startRootlessCNIInfraContainer(ctx, r) + } + st, err := c.ContainerState() + if err != nil { + return nil, err + } + if st.State == define.ContainerStateRunning { + logrus.Debugf("rootless CNI: infra container %q is already running", c.ID()) + return c, nil + } + logrus.Debugf("rootless CNI: infra container %q is %q, being started", c.ID(), st.State) + if err := c.initAndStart(ctx); err != nil { + return nil, err + } + logrus.Debugf("rootless CNI: infra container %q is running", c.ID()) + return c, nil +} + +func startRootlessCNIInfraContainer(ctx context.Context, r *Runtime) (*Container, error) { + imageName, ok := rootlessCNIInfraImage[runtime.GOARCH] + if !ok { + return nil, errors.Errorf("cannot find rootless-podman-network-sandbox image for %s", runtime.GOARCH) + } + logrus.Debugf("rootless CNI: ensuring image %q to exist", imageName) + newImage, err := r.ImageRuntime().New(ctx, imageName, "", "", nil, nil, + image.SigningOptions{}, nil, util.PullImageMissing) + if err != nil { + return nil, err + } + logrus.Debugf("rootless CNI: image %q is ready", imageName) + + g, err := generate.New("linux") + if err != nil { + return nil, err + } + g.SetupPrivileged(true) + // Set --pid=host for ease of propagating "/proc/PID/ns/net" string + if err := g.RemoveLinuxNamespace(string(spec.PIDNamespace)); err != nil { + return nil, err + } + g.RemoveMount("/proc") + procMount := spec.Mount{ + Destination: "/proc", + Type: "bind", + Source: "/proc", + Options: []string{"rbind", "nosuid", "noexec", "nodev"}, + } + g.AddMount(procMount) + // Mount CNI networks + etcCNINetD := spec.Mount{ + Destination: "/etc/cni/net.d", + Type: "bind", + Source: r.config.Network.NetworkConfigDir, + Options: []string{"ro"}, + } + g.AddMount(etcCNINetD) + // FIXME: how to propagate ProcessArgs and Envs from Dockerfile? + g.SetProcessArgs([]string{"sleep", "infinity"}) + g.AddProcessEnv("CNI_PATH", "/opt/cni/bin") + var options []CtrCreateOption + options = append(options, WithRootFSFromImage(newImage.ID(), imageName, imageName)) + options = append(options, WithCtrNamespace(rootlessCNIInfraContainerNamespace)) + options = append(options, WithName(rootlessCNIInfraContainerName)) + options = append(options, WithPrivileged(true)) + options = append(options, WithSecLabels([]string{"disable"})) + options = append(options, WithRestartPolicy("always")) + options = append(options, WithNetNS(nil, false, "slirp4netns", nil)) + c, err := r.NewContainer(ctx, g.Config, options...) + if err != nil { + return nil, err + } + logrus.Debugf("rootless CNI infra container %q is created, now being started", c.ID()) + if err := c.initAndStart(ctx); err != nil { + return nil, err + } + logrus.Debugf("rootless CNI: infra container %q is running", c.ID()) + + return c, nil +} + +func rootlessCNIInfraExec(c *Container, args ...string) (string, error) { + cmd := "rootless-cni-infra" + var ( + outB bytes.Buffer + errB bytes.Buffer + streams define.AttachStreams + config ExecConfig + ) + streams.OutputStream = &nopWriteCloser{Writer: &outB} + streams.ErrorStream = &nopWriteCloser{Writer: &errB} + streams.AttachOutput = true + streams.AttachError = true + config.Command = append([]string{cmd}, args...) + config.Privileged = true + logrus.Debugf("rootlessCNIInfraExec: c.ID()=%s, config=%+v, streams=%v, begin", + c.ID(), config, streams) + code, err := c.Exec(&config, &streams, nil) + logrus.Debugf("rootlessCNIInfraExec: c.ID()=%s, config=%+v, streams=%v, end (code=%d, err=%v)", + c.ID(), config, streams, code, err) + if err != nil { + return "", err + } + if code != 0 { + return "", errors.Errorf("command %s %v in container %s failed with status %d, stdout=%q, stderr=%q", + cmd, args, c.ID(), code, outB.String(), errB.String()) + } + return outB.String(), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (nwc *nopWriteCloser) Close() error { + return nil +} diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index fa91fe002..241448981 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -8,11 +8,13 @@ import ( "strings" "time" + "github.com/containers/buildah" "github.com/containers/common/pkg/config" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/events" "github.com/containers/podman/v2/pkg/cgroups" "github.com/containers/podman/v2/pkg/rootless" + "github.com/containers/storage" "github.com/containers/storage/pkg/stringid" "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -770,7 +772,11 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) { func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error) { r.lock.RLock() defer r.lock.RUnlock() + return r.GetContainersWithoutLock(filters...) +} +// GetContainersWithoutLock is same as GetContainers but without lock +func (r *Runtime) GetContainersWithoutLock(filters ...ContainerFilter) ([]*Container, error) { if !r.valid { return nil, define.ErrRuntimeStopped } @@ -905,3 +911,34 @@ func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) (map[string]int } return prunedContainers, pruneErrors, nil } + +// StorageContainers returns a list of containers from containers/storage that +// are not currently known to Podman. +func (r *Runtime) StorageContainers() ([]storage.Container, error) { + + if r.store == nil { + return nil, define.ErrStoreNotInitialized + } + + storeContainers, err := r.store.Containers() + if err != nil { + return nil, errors.Wrapf(err, "error reading list of all storage containers") + } + retCtrs := []storage.Container{} + for _, container := range storeContainers { + exists, err := r.state.HasContainer(container.ID) + if err != nil && err != define.ErrNoSuchCtr { + return nil, errors.Wrapf(err, "failed to check if %s container exists in database", container.ID) + } + if exists { + continue + } + retCtrs = append(retCtrs, container) + } + + return retCtrs, nil +} + +func (r *Runtime) IsBuildahContainer(id string) (bool, error) { + return buildah.IsContainer(id, r.store) +} diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index 2bc9feb65..eb4512f8d 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -282,9 +282,16 @@ func (r *Runtime) LoadImage(ctx context.Context, name, inputFile string, writer src types.ImageReference ) + if name == "" { + newImages, err = r.ImageRuntime().LoadAllImagesFromDockerArchive(ctx, inputFile, signaturePolicy, writer) + if err == nil { + return getImageNames(newImages), nil + } + } + for _, referenceFn := range []func() (types.ImageReference, error){ func() (types.ImageReference, error) { - return dockerarchive.ParseReference(inputFile) // FIXME? We should add dockerarchive.NewReference() + return dockerarchive.ParseReference(inputFile) }, func() (types.ImageReference, error) { return ociarchive.NewReference(inputFile, name) // name may be "" |