aboutsummaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/adapter/containers.go5
-rw-r--r--pkg/adapter/terminal_linux.go5
-rw-r--r--pkg/adapter/terminal_unsupported.go2
-rw-r--r--pkg/api/handlers/compat/containers_export.go42
-rw-r--r--pkg/api/handlers/compat/images_push.go80
-rw-r--r--pkg/api/handlers/libpod/containers.go131
-rw-r--r--pkg/api/handlers/libpod/containers_create.go7
-rw-r--r--pkg/api/handlers/libpod/images.go179
-rw-r--r--pkg/api/handlers/swagger.go4
-rw-r--r--pkg/api/handlers/types.go4
-rw-r--r--pkg/api/handlers/utils/handler.go7
-rw-r--r--pkg/api/handlers/utils/images.go41
-rw-r--r--pkg/api/server/register_containers.go140
-rw-r--r--pkg/api/server/register_images.go74
-rw-r--r--pkg/bindings/containers/checkpoint.go79
-rw-r--r--pkg/bindings/containers/containers.go20
-rw-r--r--pkg/bindings/images/images.go53
-rw-r--r--pkg/bindings/test/images_test.go4
-rw-r--r--pkg/checkpoint/checkpoint_restore.go (renamed from pkg/adapter/checkpoint_restore.go)8
-rw-r--r--pkg/domain/entities/containers.go69
-rw-r--r--pkg/domain/entities/engine_container.go7
-rw-r--r--pkg/domain/entities/engine_image.go4
-rw-r--r--pkg/domain/entities/images.go69
-rw-r--r--pkg/domain/infra/abi/containers.go202
-rw-r--r--pkg/domain/infra/abi/images.go98
-rw-r--r--pkg/domain/infra/abi/terminal/sigproxy_linux.go47
-rw-r--r--pkg/domain/infra/abi/terminal/terminal.go103
-rw-r--r--pkg/domain/infra/abi/terminal/terminal_linux.go123
-rw-r--r--pkg/domain/infra/tunnel/containers.go105
-rw-r--r--pkg/domain/infra/tunnel/images.go84
-rw-r--r--pkg/domain/infra/tunnel/system.go1
-rw-r--r--pkg/spec/spec.go16
-rw-r--r--pkg/spec/storage.go78
-rw-r--r--pkg/specgen/config_linux.go93
-rw-r--r--pkg/specgen/config_linux_cgo.go1
-rw-r--r--pkg/specgen/container_validate.go4
-rw-r--r--pkg/specgen/generate/container.go168
-rw-r--r--pkg/specgen/generate/container_create.go (renamed from pkg/specgen/container_create.go)19
-rw-r--r--pkg/specgen/namespaces.go7
-rw-r--r--pkg/specgen/oci.go6
-rw-r--r--pkg/specgen/security.go165
-rw-r--r--pkg/specgen/specgen.go13
-rw-r--r--pkg/specgen/storage.go885
-rw-r--r--pkg/util/mountOpts.go24
-rw-r--r--pkg/util/mountOpts_linux.go23
-rw-r--r--pkg/util/mountOpts_other.go7
-rw-r--r--pkg/varlinkapi/attach.go8
47 files changed, 3131 insertions, 183 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index c395ffc7f..ecadbd2f9 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -26,6 +26,7 @@ import (
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/adapter/shortcuts"
+ "github.com/containers/libpod/pkg/checkpoint"
envLib "github.com/containers/libpod/pkg/env"
"github.com/containers/libpod/pkg/systemd/generate"
"github.com/containers/storage"
@@ -625,7 +626,7 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues)
switch {
case c.Import != "":
- containers, err = crImportCheckpoint(ctx, r.Runtime, c.Import, c.Name)
+ containers, err = checkpoint.CRImportCheckpoint(ctx, r.Runtime, c.Import, c.Name)
case c.All:
containers, err = r.GetContainers(filterFuncs...)
default:
@@ -1003,7 +1004,7 @@ func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecVal
}
env = envLib.Join(env, cliEnv)
- streams := new(libpod.AttachStreams)
+ streams := new(define.AttachStreams)
streams.OutputStream = os.Stdout
streams.ErrorStream = os.Stderr
if cli.Interactive {
diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go
index ef5a6f926..a56704be6 100644
--- a/pkg/adapter/terminal_linux.go
+++ b/pkg/adapter/terminal_linux.go
@@ -7,6 +7,7 @@ import (
"os"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
@@ -14,7 +15,7 @@ import (
)
// ExecAttachCtr execs and attaches to a container
-func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *libpod.AttachStreams, preserveFDs uint, detachKeys string) (int, error) {
+func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) {
resize := make(chan remotecommand.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
@@ -69,7 +70,7 @@ func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr,
defer cancel()
}
- streams := new(libpod.AttachStreams)
+ streams := new(define.AttachStreams)
streams.OutputStream = stdout
streams.ErrorStream = stderr
streams.InputStream = bufio.NewReader(stdin)
diff --git a/pkg/adapter/terminal_unsupported.go b/pkg/adapter/terminal_unsupported.go
index 3009f0a38..9067757a1 100644
--- a/pkg/adapter/terminal_unsupported.go
+++ b/pkg/adapter/terminal_unsupported.go
@@ -11,7 +11,7 @@ import (
)
// ExecAttachCtr execs and attaches to a container
-func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *libpod.AttachStreams, preserveFDs uint, detachKeys string) (int, error) {
+func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) {
return -1, define.ErrNotImplemented
}
diff --git a/pkg/api/handlers/compat/containers_export.go b/pkg/api/handlers/compat/containers_export.go
new file mode 100644
index 000000000..37b9fbf2b
--- /dev/null
+++ b/pkg/api/handlers/compat/containers_export.go
@@ -0,0 +1,42 @@
+package compat
+
+import (
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/pkg/errors"
+)
+
+func ExportContainer(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ name := utils.GetName(r)
+ con, err := runtime.LookupContainer(name)
+ if err != nil {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+ tmpfile, err := ioutil.TempFile("", "api.tar")
+ if err != nil {
+ utils.Error(w, "unable to create tarball tempfile", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ return
+ }
+ defer os.Remove(tmpfile.Name())
+ if err := tmpfile.Close(); err != nil {
+ utils.Error(w, "unable to close tempfile", http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ return
+ }
+ if err := con.Export(tmpfile.Name()); err != nil {
+ utils.Error(w, "failed to save the image", http.StatusInternalServerError, errors.Wrap(err, "failed to save image"))
+ return
+ }
+ rdr, err := os.Open(tmpfile.Name())
+ if err != nil {
+ utils.Error(w, "failed to read temp tarball", http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
+ return
+ }
+ defer rdr.Close()
+ utils.WriteResponse(w, http.StatusOK, rdr)
+}
diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go
new file mode 100644
index 000000000..2260d5557
--- /dev/null
+++ b/pkg/api/handlers/compat/images_push.go
@@ -0,0 +1,80 @@
+package compat
+
+import (
+ "context"
+ "net/http"
+ "os"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/image"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/gorilla/schema"
+ "github.com/pkg/errors"
+)
+
+// PushImage is the handler for the compat http endpoint for pushing images.
+func PushImage(w http.ResponseWriter, r *http.Request) {
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+
+ query := struct {
+ Tag string `schema:"tag"`
+ }{
+ // This is where you can override the golang default value for one of fields
+ }
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+
+ // Note that Docker's docs state "Image name or ID" to be in the path
+ // parameter but it really must be a name as Docker does not allow for
+ // pushing an image by ID.
+ imageName := strings.TrimSuffix(utils.GetName(r), "/push") // GetName returns the entire path
+ if query.Tag != "" {
+ imageName += ":" + query.Tag
+ }
+ if _, err := utils.ParseStorageReference(imageName); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "image source %q is not a containers-storage-transport reference", imageName))
+ return
+ }
+
+ newImage, err := runtime.ImageRuntime().NewFromLocal(imageName)
+ if err != nil {
+ utils.ImageNotFound(w, imageName, errors.Wrapf(err, "Failed to find image %s", imageName))
+ return
+ }
+
+ // TODO: the X-Registry-Auth header is not checked yet here nor in any other
+ // endpoint. Pushing does NOT work with authentication at the moment.
+ dockerRegistryOptions := &image.DockerRegistryOptions{}
+ authfile := ""
+ if sys := runtime.SystemContext(); sys != nil {
+ dockerRegistryOptions.DockerCertPath = sys.DockerCertPath
+ authfile = sys.AuthFilePath
+ }
+
+ err = newImage.PushImageToHeuristicDestination(
+ context.Background(),
+ imageName,
+ "", // manifest type
+ authfile,
+ "", // digest file
+ "", // signature policy
+ os.Stderr,
+ false, // force compression
+ image.SigningOptions{},
+ dockerRegistryOptions,
+ nil, // additional tags
+ )
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Error pushing image %q", imageName))
+ return
+ }
+
+ utils.WriteResponse(w, http.StatusOK, "")
+
+}
diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go
index cdc34004f..fde72552b 100644
--- a/pkg/api/handlers/libpod/containers.go
+++ b/pkg/api/handlers/libpod/containers.go
@@ -1,16 +1,21 @@
package libpod
import (
+ "io/ioutil"
"net/http"
+ "os"
"path/filepath"
"sort"
"strconv"
"time"
+ "github.com/containers/libpod/pkg/api/handlers/compat"
+
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/domain/entities"
"github.com/gorilla/schema"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -325,3 +330,129 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts shared.P
}
return ps, nil
}
+
+func Checkpoint(w http.ResponseWriter, r *http.Request) {
+ var targetFile string
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ query := struct {
+ Keep bool `schema:"keep"`
+ LeaveRunning bool `schema:"leaveRunning"`
+ TCPEstablished bool `schema:"tcpEstablished"`
+ Export bool `schema:"export"`
+ IgnoreRootFS bool `schema:"ignoreRootFS"`
+ }{
+ // override any golang type defaults
+ }
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+ name := utils.GetName(r)
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ ctr, err := runtime.LookupContainer(name)
+ if err != nil {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+ if query.Export {
+ tmpFile, err := ioutil.TempFile("", "checkpoint")
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ defer os.Remove(tmpFile.Name())
+ if err := tmpFile.Close(); err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ targetFile = tmpFile.Name()
+ }
+ options := libpod.ContainerCheckpointOptions{
+ Keep: query.Keep,
+ KeepRunning: query.LeaveRunning,
+ TCPEstablished: query.TCPEstablished,
+ IgnoreRootfs: query.IgnoreRootFS,
+ }
+ if query.Export {
+ options.TargetFile = targetFile
+ }
+ err = ctr.Checkpoint(r.Context(), options)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ if query.Export {
+ f, err := os.Open(targetFile)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ defer f.Close()
+ utils.WriteResponse(w, http.StatusOK, f)
+ return
+ }
+ utils.WriteResponse(w, http.StatusOK, entities.CheckpointReport{Id: ctr.ID()})
+}
+
+func Restore(w http.ResponseWriter, r *http.Request) {
+ var (
+ targetFile string
+ )
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ query := struct {
+ Keep bool `schema:"keep"`
+ TCPEstablished bool `schema:"tcpEstablished"`
+ Import bool `schema:"import"`
+ Name string `schema:"name"`
+ IgnoreRootFS bool `schema:"ignoreRootFS"`
+ IgnoreStaticIP bool `schema:"ignoreStaticIP"`
+ IgnoreStaticMAC bool `schema:"ignoreStaticMAC"`
+ }{
+ // override any golang type defaults
+ }
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+ name := utils.GetName(r)
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ ctr, err := runtime.LookupContainer(name)
+ if err != nil {
+ utils.ContainerNotFound(w, name, err)
+ return
+ }
+ if query.Import {
+ t, err := ioutil.TempFile("", "restore")
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ defer t.Close()
+ if err := compat.SaveFromBody(t, r); err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ targetFile = t.Name()
+ }
+
+ options := libpod.ContainerCheckpointOptions{
+ Keep: query.Keep,
+ TCPEstablished: query.TCPEstablished,
+ IgnoreRootfs: query.IgnoreRootFS,
+ IgnoreStaticIP: query.IgnoreStaticIP,
+ IgnoreStaticMAC: query.IgnoreStaticMAC,
+ }
+ if query.Import {
+ options.TargetFile = targetFile
+ options.Name = query.Name
+ }
+ err = ctr.Restore(r.Context(), options)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ utils.WriteResponse(w, http.StatusOK, entities.RestoreReport{Id: ctr.ID()})
+}
diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go
index ebca41151..38a341a89 100644
--- a/pkg/api/handlers/libpod/containers_create.go
+++ b/pkg/api/handlers/libpod/containers_create.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/containers/libpod/pkg/specgen"
+ "github.com/containers/libpod/pkg/specgen/generate"
"github.com/pkg/errors"
)
@@ -19,7 +20,11 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
return
}
- ctr, err := sg.MakeContainer(runtime)
+ if err := generate.CompleteSpec(r.Context(), runtime, &sg); err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ ctr, err := generate.MakeContainer(runtime, &sg)
if err != nil {
utils.InternalServerError(w, err)
return
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index bc227d9a1..850de4598 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -14,15 +14,16 @@ import (
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
image2 "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/util"
+ utils2 "github.com/containers/libpod/utils"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
@@ -162,13 +163,16 @@ func PruneImages(w http.ResponseWriter, r *http.Request) {
}
func ExportImage(w http.ResponseWriter, r *http.Request) {
+ var (
+ output string
+ )
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
Compress bool `schema:"compress"`
Format string `schema:"format"`
}{
- Format: "docker-archive",
+ Format: define.OCIArchive,
}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
@@ -176,14 +180,27 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
return
}
-
- tmpfile, err := ioutil.TempFile("", "api.tar")
- if err != nil {
- utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
- return
- }
- if err := tmpfile.Close(); err != nil {
- utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ switch query.Format {
+ case define.OCIArchive, define.V2s2Archive:
+ tmpfile, err := ioutil.TempFile("", "api.tar")
+ if err != nil {
+ utils.Error(w, "unable to create tmpfile", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
+ return
+ }
+ output = tmpfile.Name()
+ if err := tmpfile.Close(); err != nil {
+ utils.Error(w, "unable to close tmpfile", http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
+ return
+ }
+ case define.OCIManifestDir, define.V2s2ManifestDir:
+ tmpdir, err := ioutil.TempDir("", "save")
+ if err != nil {
+ utils.Error(w, "unable to create tmpdir", http.StatusInternalServerError, errors.Wrap(err, "unable to create tempdir"))
+ return
+ }
+ output = tmpdir
+ default:
+ utils.Error(w, "unknown format", http.StatusInternalServerError, errors.Errorf("unknown format %q", query.Format))
return
}
name := utils.GetName(r)
@@ -193,17 +210,28 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
return
}
- if err := newImage.Save(r.Context(), name, query.Format, tmpfile.Name(), []string{}, false, query.Compress); err != nil {
+ if err := newImage.Save(r.Context(), name, query.Format, output, []string{}, false, query.Compress); err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, err)
return
}
- rdr, err := os.Open(tmpfile.Name())
+ defer os.RemoveAll(output)
+ // if dir format, we need to tar it
+ if query.Format == "oci-dir" || query.Format == "docker-dir" {
+ rdr, err := utils2.Tar(output)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ defer rdr.Close()
+ utils.WriteResponse(w, http.StatusOK, rdr)
+ return
+ }
+ rdr, err := os.Open(output)
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
return
}
defer rdr.Close()
- defer os.Remove(tmpfile.Name())
utils.WriteResponse(w, http.StatusOK, rdr)
}
@@ -254,7 +282,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
return
}
}
- utils.WriteResponse(w, http.StatusOK, handlers.LibpodImagesLoadReport{ID: loadedImage})
+ utils.WriteResponse(w, http.StatusOK, entities.ImageLoadReport{Name: loadedImage})
}
func ImagesImport(w http.ResponseWriter, r *http.Request) {
@@ -300,7 +328,7 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) {
return
}
- utils.WriteResponse(w, http.StatusOK, handlers.LibpodImagesImportReport{ID: importedImage})
+ utils.WriteResponse(w, http.StatusOK, entities.ImageImportReport{Id: importedImage})
}
// ImagesPull is the v2 libpod endpoint for pulling images. Note that the
@@ -331,29 +359,16 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, errors.New("reference parameter cannot be empty"))
return
}
- // Enforce the docker transport. This is just a precaution as some callers
- // might be accustomed to using the "transport:reference" notation. Using
- // another than the "docker://" transport does not really make sense for a
- // remote case. For loading tarballs, the load and import endpoints should
- // be used.
- dockerPrefix := fmt.Sprintf("%s://", docker.Transport.Name())
- imageRef, err := alltransports.ParseImageName(query.Reference)
- if err == nil && imageRef.Transport().Name() != docker.Transport.Name() {
+
+ imageRef, err := utils.ParseDockerReference(query.Reference)
+ if err != nil {
utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
- errors.Errorf("reference %q must be a docker reference", query.Reference))
+ errors.Wrapf(err, "image destination %q is not a docker-transport reference", query.Reference))
return
- } else if err != nil {
- origErr := err
- imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, query.Reference))
- if err != nil {
- utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
- errors.Wrapf(origErr, "reference %q must be a docker reference", query.Reference))
- return
- }
}
// Trim the docker-transport prefix.
- rawImage := strings.TrimPrefix(query.Reference, dockerPrefix)
+ rawImage := strings.TrimPrefix(query.Reference, fmt.Sprintf("%s://", docker.Transport.Name()))
// all-tags doesn't work with a tagged reference, so let's check early
namedRef, err := reference.Parse(rawImage)
@@ -385,7 +400,7 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
OSChoice: query.OverrideOS,
ArchitectureChoice: query.OverrideArch,
}
- if query.TLSVerify {
+ if _, found := r.URL.Query()["tlsVerify"]; found {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
}
@@ -408,13 +423,19 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
}
}
+ authfile := ""
+ if sys := runtime.SystemContext(); sys != nil {
+ dockerRegistryOptions.DockerCertPath = sys.DockerCertPath
+ authfile = sys.AuthFilePath
+ }
+
// Finally pull the images
for _, img := range imagesToPull {
newImage, err := runtime.ImageRuntime().New(
context.Background(),
img,
"",
- "",
+ authfile,
os.Stderr,
&dockerRegistryOptions,
image.SigningOptions{},
@@ -430,6 +451,94 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
utils.WriteResponse(w, http.StatusOK, res)
}
+// PushImage is the handler for the compat http endpoint for pushing images.
+func PushImage(w http.ResponseWriter, r *http.Request) {
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+
+ query := struct {
+ Credentials string `schema:"credentials"`
+ Destination string `schema:"destination"`
+ TLSVerify bool `schema:"tlsVerify"`
+ }{
+ // This is where you can override the golang default value for one of fields
+ }
+
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+
+ source := strings.TrimSuffix(utils.GetName(r), "/push") // GetName returns the entire path
+ if _, err := utils.ParseStorageReference(source); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "image source %q is not a containers-storage-transport reference", source))
+ return
+ }
+
+ destination := query.Destination
+ if destination == "" {
+ destination = source
+ }
+
+ if _, err := utils.ParseDockerReference(destination); err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "image destination %q is not a docker-transport reference", destination))
+ return
+ }
+
+ newImage, err := runtime.ImageRuntime().NewFromLocal(source)
+ if err != nil {
+ utils.ImageNotFound(w, source, errors.Wrapf(err, "Failed to find image %s", source))
+ return
+ }
+
+ var registryCreds *types.DockerAuthConfig
+ if len(query.Credentials) != 0 {
+ creds, err := util.ParseRegistryCreds(query.Credentials)
+ if err != nil {
+ utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
+ errors.Wrapf(err, "error parsing credentials %q", query.Credentials))
+ return
+ }
+ registryCreds = creds
+ }
+
+ // TODO: the X-Registry-Auth header is not checked yet here nor in any other
+ // endpoint. Pushing does NOT work with authentication at the moment.
+ dockerRegistryOptions := &image.DockerRegistryOptions{
+ DockerRegistryCreds: registryCreds,
+ }
+ authfile := ""
+ if sys := runtime.SystemContext(); sys != nil {
+ dockerRegistryOptions.DockerCertPath = sys.DockerCertPath
+ authfile = sys.AuthFilePath
+ }
+ if _, found := r.URL.Query()["tlsVerify"]; found {
+ dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
+ }
+
+ err = newImage.PushImageToHeuristicDestination(
+ context.Background(),
+ destination,
+ "", // manifest type
+ authfile,
+ "", // digest file
+ "", // signature policy
+ os.Stderr,
+ false, // force compression
+ image.SigningOptions{},
+ dockerRegistryOptions,
+ nil, // additional tags
+ )
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Error pushing image %q", destination))
+ return
+ }
+
+ utils.WriteResponse(w, http.StatusOK, "")
+}
+
func CommitContainer(w http.ResponseWriter, r *http.Request) {
var (
destImage string
diff --git a/pkg/api/handlers/swagger.go b/pkg/api/handlers/swagger.go
index 52763a050..33a9fdd58 100644
--- a/pkg/api/handlers/swagger.go
+++ b/pkg/api/handlers/swagger.go
@@ -31,14 +31,14 @@ type swagImageInspect struct {
// swagger:response DocsLibpodImagesLoadResponse
type swagLibpodImagesLoadResponse struct {
// in:body
- Body []LibpodImagesLoadReport
+ Body entities.ImageLoadReport
}
// Import response
// swagger:response DocsLibpodImagesImportResponse
type swagLibpodImagesImportResponse struct {
// in:body
- Body LibpodImagesImportReport
+ Body entities.ImageImportReport
}
// Pull response
diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go
index 89a571e67..496512f2e 100644
--- a/pkg/api/handlers/types.go
+++ b/pkg/api/handlers/types.go
@@ -38,10 +38,6 @@ type LibpodImagesLoadReport struct {
ID string `json:"id"`
}
-type LibpodImagesImportReport struct {
- ID string `json:"id"`
-}
-
type LibpodImagesPullReport struct {
ID string `json:"id"`
}
diff --git a/pkg/api/handlers/utils/handler.go b/pkg/api/handlers/utils/handler.go
index 32b8c5b0a..b5bd488fb 100644
--- a/pkg/api/handlers/utils/handler.go
+++ b/pkg/api/handlers/utils/handler.go
@@ -46,6 +46,13 @@ func WriteResponse(w http.ResponseWriter, code int, value interface{}) {
if _, err := io.Copy(w, v); err != nil {
logrus.Errorf("unable to copy to response: %q", err)
}
+ case io.Reader:
+ w.Header().Set("Content-Type", "application/x-tar")
+ w.WriteHeader(code)
+
+ if _, err := io.Copy(w, v); err != nil {
+ logrus.Errorf("unable to copy to response: %q", err)
+ }
default:
WriteJSON(w, code, value)
}
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index 696d5f745..1c67de9db 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -4,11 +4,52 @@ import (
"fmt"
"net/http"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/storage"
+ "github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
"github.com/gorilla/schema"
+ "github.com/pkg/errors"
)
+// ParseDockerReference parses the specified image name to a
+// `types.ImageReference` and enforces it to refer to a docker-transport
+// reference.
+func ParseDockerReference(name string) (types.ImageReference, error) {
+ dockerPrefix := fmt.Sprintf("%s://", docker.Transport.Name())
+ imageRef, err := alltransports.ParseImageName(name)
+ if err == nil && imageRef.Transport().Name() != docker.Transport.Name() {
+ return nil, errors.Errorf("reference %q must be a docker reference", name)
+ } else if err != nil {
+ origErr := err
+ imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, name))
+ if err != nil {
+ return nil, errors.Wrapf(origErr, "reference %q must be a docker reference", name)
+ }
+ }
+ return imageRef, nil
+}
+
+// ParseStorageReference parses the specified image name to a
+// `types.ImageReference` and enforces it to refer to a
+// containers-storage-transport reference.
+func ParseStorageReference(name string) (types.ImageReference, error) {
+ storagePrefix := fmt.Sprintf("%s:", storage.Transport.Name())
+ imageRef, err := alltransports.ParseImageName(name)
+ if err == nil && imageRef.Transport().Name() != docker.Transport.Name() {
+ return nil, errors.Errorf("reference %q must be a storage reference", name)
+ } else if err != nil {
+ origErr := err
+ imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", storagePrefix, name))
+ if err != nil {
+ return nil, errors.Wrapf(origErr, "reference %q must be a storage reference", name)
+ }
+ }
+ return imageRef, nil
+}
+
// GetImages is a common function used to get images for libpod and other compatibility
// mechanisms
func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
diff --git a/pkg/api/server/register_containers.go b/pkg/api/server/register_containers.go
index 08834ff01..f126112d0 100644
--- a/pkg/api/server/register_containers.go
+++ b/pkg/api/server/register_containers.go
@@ -587,6 +587,29 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
r.HandleFunc(VersionedPath("/containers/{name}/resize"), s.APIHandler(compat.ResizeContainer)).Methods(http.MethodPost)
// Added non version path to URI to support docker non versioned paths
r.HandleFunc("/containers/{name}/resize", s.APIHandler(compat.ResizeContainer)).Methods(http.MethodPost)
+ // swagger:operation GET /containers/{name}/export compat exportContainer
+ // ---
+ // tags:
+ // - containers (compat)
+ // summary: Export a container
+ // description: Export the contents of a container as a tarball.
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name or ID of the container
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: tarball is returned in body
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/containers/{name}/export"), s.APIHandler(compat.ExportContainer)).Methods(http.MethodGet)
+ r.HandleFunc("/containers/{name}/export", s.APIHandler(compat.ExportContainer)).Methods(http.MethodGet)
/*
libpod endpoints
@@ -1237,5 +1260,122 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
// 500:
// $ref: "#/responses/InternalError"
r.HandleFunc(VersionedPath("/libpod/containers/{name}/resize"), s.APIHandler(compat.ResizeContainer)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/containers/{name}/export libpod libpodExportContainer
+ // ---
+ // tags:
+ // - containers
+ // summary: Export a container
+ // description: Export the contents of a container as a tarball.
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name or ID of the container
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: tarball is returned in body
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/export"), s.APIHandler(compat.ExportContainer)).Methods(http.MethodGet)
+ // swagger:operation GET /libpod/containers/{name}/checkout libpod libpodCheckpointContainer
+ // ---
+ // tags:
+ // - containers
+ // summary: Checkpoint a container
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name or ID of the container
+ // - in: query
+ // name: keep
+ // type: boolean
+ // description: keep all temporary checkpoint files
+ // - in: query
+ // name: leaveRunning
+ // type: boolean
+ // description: leave the container running after writing checkpoint to disk
+ // - in: query
+ // name: tcpEstablished
+ // type: boolean
+ // description: checkpoint a container with established TCP connections
+ // - in: query
+ // name: export
+ // type: boolean
+ // description: export the checkpoint image to a tar.gz
+ // - in: query
+ // name: ignoreRootFS
+ // type: boolean
+ // description: do not include root file-system changes when exporting
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: tarball is returned in body if exported
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/checkpoint"), s.APIHandler(libpod.Checkpoint)).Methods(http.MethodPost)
+ // swagger:operation GET /libpod/containers/{name} restore libpod libpodRestoreContainer
+ // ---
+ // tags:
+ // - containers
+ // summary: Restore a container
+ // description: Restore a container from a checkpoint.
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name or id of the container
+ // - in: query
+ // name: name
+ // type: string
+ // description: the name of the container when restored from a tar. can only be used with import
+ // - in: query
+ // name: keep
+ // type: boolean
+ // description: keep all temporary checkpoint files
+ // - in: query
+ // name: leaveRunning
+ // type: boolean
+ // description: leave the container running after writing checkpoint to disk
+ // - in: query
+ // name: tcpEstablished
+ // type: boolean
+ // description: checkpoint a container with established TCP connections
+ // - in: query
+ // name: import
+ // type: boolean
+ // description: import the restore from a checkpoint tar.gz
+ // - in: query
+ // name: ignoreRootFS
+ // type: boolean
+ // description: do not include root file-system changes when exporting
+ // - in: query
+ // name: ignoreStaticIP
+ // type: boolean
+ // description: ignore IP address if set statically
+ // - in: query
+ // name: ignoreStaticMAC
+ // type: boolean
+ // description: ignore MAC address if set statically
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: tarball is returned in body if exported
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/restore"), s.APIHandler(libpod.Restore)).Methods(http.MethodPost)
return nil
}
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 74b245a77..d45423096 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -211,6 +211,41 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
r.Handle(VersionedPath("/images/{name:.*}"), s.APIHandler(compat.RemoveImage)).Methods(http.MethodDelete)
// Added non version path to URI to support docker non versioned paths
r.Handle("/images/{name:.*}", s.APIHandler(compat.RemoveImage)).Methods(http.MethodDelete)
+ // swagger:operation POST /images/{name:.*}/push compat pushImage
+ // ---
+ // tags:
+ // - images (compat)
+ // summary: Push Image
+ // description: Push an image to a container registry
+ // parameters:
+ // - in: path
+ // name: name:.*
+ // type: string
+ // required: true
+ // description: Name of image to push.
+ // - in: query
+ // name: tag
+ // type: string
+ // description: The tag to associate with the image on the registry.
+ // - in: header
+ // name: X-Registry-Auth
+ // type: string
+ // description: A base64-encoded auth configuration.
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: no error
+ // schema:
+ // type: string
+ // format: binary
+ // 404:
+ // $ref: '#/responses/NoSuchImage'
+ // 500:
+ // $ref: '#/responses/InternalError'
+ r.Handle(VersionedPath("/images/{name:.*}/push"), s.APIHandler(compat.PushImage)).Methods(http.MethodPost)
+ // Added non version path to URI to support docker non versioned paths
+ r.Handle("/images/{name:.*}/push", s.APIHandler(compat.PushImage)).Methods(http.MethodPost)
// swagger:operation GET /images/{name:.*}/get compat exportImage
// ---
// tags:
@@ -583,6 +618,43 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
libpod endpoints
*/
+ // swagger:operation POST /libpod/images/{name:.*}/push libpod libpodPushImage
+ // ---
+ // tags:
+ // - images (libpod)
+ // summary: Push Image
+ // description: Push an image to a container registry
+ // parameters:
+ // - in: path
+ // name: name:.*
+ // type: string
+ // required: true
+ // description: Name of image to push.
+ // - in: query
+ // name: tag
+ // type: string
+ // description: The tag to associate with the image on the registry.
+ // - in: query
+ // name: credentials
+ // description: username:password for the registry.
+ // type: string
+ // - in: header
+ // name: X-Registry-Auth
+ // type: string
+ // description: A base64-encoded auth configuration.
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // description: no error
+ // schema:
+ // type: string
+ // format: binary
+ // 404:
+ // $ref: '#/responses/NoSuchImage'
+ // 500:
+ // $ref: '#/responses/InternalError'
+ r.Handle(VersionedPath("/libpod/images/{name:.*}/push"), s.APIHandler(libpod.PushImage)).Methods(http.MethodPost)
// swagger:operation GET /libpod/images/{name:.*}/exists libpod libpodImageExists
// ---
// tags:
@@ -883,7 +955,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// tags:
// - images
// summary: Export an image
- // description: Export an image as a tarball
+ // description: Export an image
// parameters:
// - in: path
// name: name:.*
diff --git a/pkg/bindings/containers/checkpoint.go b/pkg/bindings/containers/checkpoint.go
new file mode 100644
index 000000000..84924587b
--- /dev/null
+++ b/pkg/bindings/containers/checkpoint.go
@@ -0,0 +1,79 @@
+package containers
+
+import (
+ "context"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/containers/libpod/pkg/bindings"
+ "github.com/containers/libpod/pkg/domain/entities"
+)
+
+// Checkpoint checkpoints the given container (identified by nameOrId). All additional
+// options are options and allow for more fine grained control of the checkpoint process.
+func Checkpoint(ctx context.Context, nameOrId string, keep, leaveRunning, tcpEstablished, ignoreRootFS *bool, export *string) (*entities.CheckpointReport, error) {
+ var report entities.CheckpointReport
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ params := url.Values{}
+ if keep != nil {
+ params.Set("keep", strconv.FormatBool(*keep))
+ }
+ if leaveRunning != nil {
+ params.Set("leaveRunning", strconv.FormatBool(*leaveRunning))
+ }
+ if tcpEstablished != nil {
+ params.Set("TCPestablished", strconv.FormatBool(*tcpEstablished))
+ }
+ if ignoreRootFS != nil {
+ params.Set("ignoreRootFS", strconv.FormatBool(*ignoreRootFS))
+ }
+ if export != nil {
+ params.Set("export", *export)
+ }
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/checkpoint", params, nameOrId)
+ if err != nil {
+ return nil, err
+ }
+ return &report, response.Process(&report)
+}
+
+// Restore restores a checkpointed container to running. The container is identified by the nameOrId option. All
+// additional options are optional and allow finer control of the restore processs.
+func Restore(ctx context.Context, nameOrId string, keep, tcpEstablished, ignoreRootFS, ignoreStaticIP, ignoreStaticMAC *bool, name, importArchive *string) (*entities.RestoreReport, error) {
+ var report entities.RestoreReport
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ params := url.Values{}
+ if keep != nil {
+ params.Set("keep", strconv.FormatBool(*keep))
+ }
+ if tcpEstablished != nil {
+ params.Set("TCPestablished", strconv.FormatBool(*tcpEstablished))
+ }
+ if ignoreRootFS != nil {
+ params.Set("ignoreRootFS", strconv.FormatBool(*ignoreRootFS))
+ }
+ if ignoreStaticIP != nil {
+ params.Set("ignoreStaticIP", strconv.FormatBool(*ignoreStaticIP))
+ }
+ if ignoreStaticMAC != nil {
+ params.Set("ignoreStaticMAC", strconv.FormatBool(*ignoreStaticMAC))
+ }
+ if name != nil {
+ params.Set("name", *name)
+ }
+ if importArchive != nil {
+ params.Set("import", *importArchive)
+ }
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restore", params, nameOrId)
+ if err != nil {
+ return nil, err
+ }
+ return &report, response.Process(&report)
+}
diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go
index bad1294f4..49a2dfd58 100644
--- a/pkg/bindings/containers/containers.go
+++ b/pkg/bindings/containers/containers.go
@@ -2,6 +2,7 @@ package containers
import (
"context"
+ "io"
"net/http"
"net/url"
"strconv"
@@ -296,3 +297,22 @@ func Stop(ctx context.Context, nameOrID string, timeout *uint) error {
}
return response.Process(nil)
}
+
+// Export creates a tarball of the given name or ID of a container. It
+// requires an io.Writer be provided to write the tarball.
+func Export(ctx context.Context, nameOrID string, w io.Writer) error {
+ params := url.Values{}
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return err
+ }
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/export", params, nameOrID)
+ if err != nil {
+ return err
+ }
+ if response.StatusCode/100 == 2 {
+ _, err = io.Copy(w, response.Body)
+ return err
+ }
+ return response.Process(nil)
+}
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index ddc67bebc..1b3df609b 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -3,6 +3,7 @@ package images
import (
"context"
"errors"
+ "fmt"
"io"
"net/http"
"net/url"
@@ -91,11 +92,11 @@ func History(ctx context.Context, nameOrID string) ([]*handlers.HistoryResponse,
return history, response.Process(&history)
}
-func Load(ctx context.Context, r io.Reader, name *string) (string, error) {
- var id handlers.IDResponse
+func Load(ctx context.Context, r io.Reader, name *string) (*entities.ImageLoadReport, error) {
+ var report entities.ImageLoadReport
conn, err := bindings.GetClient(ctx)
if err != nil {
- return "", err
+ return nil, err
}
params := url.Values{}
if name != nil {
@@ -103,9 +104,9 @@ func Load(ctx context.Context, r io.Reader, name *string) (string, error) {
}
response, err := conn.DoRequest(r, http.MethodPost, "/images/load", params)
if err != nil {
- return "", err
+ return nil, err
}
- return id.ID, response.Process(&id)
+ return &report, response.Process(&report)
}
// Remove deletes an image from local storage. The optional force parameter will forcibly remove
@@ -145,11 +146,12 @@ func Export(ctx context.Context, nameOrID string, w io.Writer, format *string, c
if err != nil {
return err
}
- if err := response.Process(nil); err != nil {
+
+ if response.StatusCode/100 == 2 || response.StatusCode/100 == 3 {
+ _, err = io.Copy(w, response.Body)
return err
}
- _, err = io.Copy(w, response.Body)
- return err
+ return nil
}
// Prune removes unused images from local storage. The optional filters can be used to further
@@ -217,14 +219,14 @@ func Build(nameOrId string) {}
// Imports adds the given image to the local image store. This can be done by file and the given reader
// or via the url parameter. Additional metadata can be associated with the image by using the changes and
// message parameters. The image can also be tagged given a reference. One of url OR r must be provided.
-func Import(ctx context.Context, changes []string, message, reference, u *string, r io.Reader) (string, error) {
- var id handlers.IDResponse
+func Import(ctx context.Context, changes []string, message, reference, u *string, r io.Reader) (*entities.ImageImportReport, error) {
+ var report entities.ImageImportReport
if r != nil && u != nil {
- return "", errors.New("url and r parameters cannot be used together")
+ return nil, errors.New("url and r parameters cannot be used together")
}
conn, err := bindings.GetClient(ctx)
if err != nil {
- return "", err
+ return nil, err
}
params := url.Values{}
for _, change := range changes {
@@ -241,9 +243,9 @@ func Import(ctx context.Context, changes []string, message, reference, u *string
}
response, err := conn.DoRequest(r, http.MethodPost, "/images/import", params)
if err != nil {
- return "", err
+ return nil, err
}
- return id.ID, response.Process(&id)
+ return &report, response.Process(&report)
}
// Pull is the binding for libpod's v2 endpoints for pulling images. Note that
@@ -283,3 +285,26 @@ func Pull(ctx context.Context, rawImage string, options entities.ImagePullOption
return pulledImages, nil
}
+
+// Push is the binding for libpod's v2 endpoints for push images. Note that
+// `source` must be a refering to an image in the remote's container storage.
+// The destination must be a reference to a registry (i.e., of docker transport
+// or be normalized to one). Other transports are rejected as they do not make
+// sense in a remote context.
+func Push(ctx context.Context, source string, destination string, options entities.ImagePushOptions) error {
+ conn, err := bindings.GetClient(ctx)
+ if err != nil {
+ return err
+ }
+ params := url.Values{}
+ params.Set("credentials", options.Credentials)
+ params.Set("destination", destination)
+ if options.TLSVerify != types.OptionalBoolUndefined {
+ val := bool(options.TLSVerify == types.OptionalBoolTrue)
+ params.Set("tlsVerify", strconv.FormatBool(val))
+ }
+
+ path := fmt.Sprintf("/images/%s/push", source)
+ _, err = conn.DoRequest(nil, http.MethodPost, path, params)
+ return err
+}
diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go
index dc01a793b..992720196 100644
--- a/pkg/bindings/test/images_test.go
+++ b/pkg/bindings/test/images_test.go
@@ -219,7 +219,7 @@ var _ = Describe("Podman images", func() {
Expect(err).To(BeNil())
names, err := images.Load(bt.conn, f, nil)
Expect(err).To(BeNil())
- Expect(names).To(Equal(alpine.name))
+ Expect(names.Name).To(Equal(alpine.name))
exists, err = images.Exists(bt.conn, alpine.name)
Expect(err).To(BeNil())
Expect(exists).To(BeTrue())
@@ -235,7 +235,7 @@ var _ = Describe("Podman images", func() {
newName := "quay.io/newname:fizzle"
names, err = images.Load(bt.conn, f, &newName)
Expect(err).To(BeNil())
- Expect(names).To(Equal(alpine.name))
+ Expect(names.Name).To(Equal(alpine.name))
exists, err = images.Exists(bt.conn, newName)
Expect(err).To(BeNil())
Expect(exists).To(BeTrue())
diff --git a/pkg/adapter/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go
index a5b74013b..78f592d32 100644
--- a/pkg/adapter/checkpoint_restore.go
+++ b/pkg/checkpoint/checkpoint_restore.go
@@ -1,6 +1,4 @@
-// +build !remoteclient
-
-package adapter
+package checkpoint
import (
"context"
@@ -42,9 +40,9 @@ func crImportFromJSON(filePath string, v interface{}) error {
return nil
}
-// crImportCheckpoint it the function which imports the information
+// CRImportCheckpoint it the function which imports the information
// from checkpoint tarball and re-creates the container from that information
-func crImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input string, name string) ([]*libpod.Container, error) {
+func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input string, name string) ([]*libpod.Container, error) {
// First get the container definition from the
// tarball to a temporary directory
archiveFile, err := os.Open(input)
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index b7f1cd812..cf907eb1b 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -2,6 +2,7 @@ package entities
import (
"io"
+ "os"
"time"
"github.com/containers/libpod/libpod/define"
@@ -117,3 +118,71 @@ type CommitOptions struct {
type CommitReport struct {
Id string
}
+
+type ContainerExportOptions struct {
+ Output string
+}
+
+type CheckpointOptions struct {
+ All bool
+ Export string
+ IgnoreRootFS bool
+ Keep bool
+ Latest bool
+ LeaveRuninng bool
+ TCPEstablished bool
+}
+
+type CheckpointReport struct {
+ Err error
+ Id string
+}
+
+type RestoreOptions struct {
+ All bool
+ IgnoreRootFS bool
+ IgnoreStaticIP bool
+ IgnoreStaticMAC bool
+ Import string
+ Keep bool
+ Latest bool
+ Name string
+ TCPEstablished bool
+}
+
+type RestoreReport struct {
+ Err error
+ Id string
+}
+
+type ContainerCreateReport struct {
+ Id string
+}
+
+// AttachOptions describes the cli and other values
+// needed to perform an attach
+type AttachOptions struct {
+ DetachKeys string
+ Latest bool
+ NoStdin bool
+ SigProxy bool
+ Stdin *os.File
+ Stdout *os.File
+ Stderr *os.File
+}
+
+// ExecOptions describes the cli values to exec into
+// a container
+type ExecOptions struct {
+ Cmd []string
+ DetachKeys string
+ Envs map[string]string
+ Interactive bool
+ Latest bool
+ PreserveFDs uint
+ Privileged bool
+ Streams define.AttachStreams
+ Tty bool
+ User string
+ WorkDir string
+}
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
index ce0c161e9..9bf3d51de 100644
--- a/pkg/domain/entities/engine_container.go
+++ b/pkg/domain/entities/engine_container.go
@@ -4,12 +4,19 @@ import (
"context"
"github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/specgen"
)
type ContainerEngine interface {
+ ContainerAttach(ctx context.Context, nameOrId string, options AttachOptions) error
ContainerCommit(ctx context.Context, nameOrId string, options CommitOptions) (*CommitReport, error)
+ ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error)
+ ContainerRestore(ctx context.Context, namesOrIds []string, options RestoreOptions) ([]*RestoreReport, error)
+ ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*ContainerCreateReport, error)
+ ContainerExec(ctx context.Context, nameOrId string, options ExecOptions) (int, error)
ContainerExists(ctx context.Context, nameOrId string) (*BoolReport, error)
ContainerInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]*ContainerInspectReport, error)
+ ContainerExport(ctx context.Context, nameOrId string, options ContainerExportOptions) error
ContainerKill(ctx context.Context, namesOrIds []string, options KillOptions) ([]*KillReport, error)
ContainerPause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error)
ContainerRestart(ctx context.Context, namesOrIds []string, options RestartOptions) ([]*RestartReport, error)
diff --git a/pkg/domain/entities/engine_image.go b/pkg/domain/entities/engine_image.go
index 2ca48e795..a28bfc548 100644
--- a/pkg/domain/entities/engine_image.go
+++ b/pkg/domain/entities/engine_image.go
@@ -14,4 +14,8 @@ type ImageEngine interface {
Pull(ctx context.Context, rawImage string, opts ImagePullOptions) (*ImagePullReport, error)
Tag(ctx context.Context, nameOrId string, tags []string, options ImageTagOptions) error
Untag(ctx context.Context, nameOrId string, tags []string, options ImageUntagOptions) error
+ Load(ctx context.Context, opts ImageLoadOptions) (*ImageLoadReport, error)
+ Import(ctx context.Context, opts ImageImportOptions) (*ImageImportReport, error)
+ Push(ctx context.Context, source string, destination string, opts ImagePushOptions) error
+ Save(ctx context.Context, nameOrId string, tags []string, options ImageSaveOptions) error
}
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 20682b05b..bc8a34c13 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -144,6 +144,43 @@ type ImagePullReport struct {
Images []string
}
+// ImagePushOptions are the arguments for pushing images.
+type ImagePushOptions struct {
+ // Authfile is the path to the authentication file. Ignored for remote
+ // calls.
+ Authfile string
+ // CertDir is the path to certificate directories. Ignored for remote
+ // calls.
+ CertDir string
+ // Compress tarball image layers when pushing to a directory using the 'dir'
+ // transport. Default is same compression type as source. Ignored for remote
+ // calls.
+ Compress bool
+ // Credentials for authenticating against the registry in the format
+ // USERNAME:PASSWORD.
+ Credentials string
+ // DigestFile, after copying the image, write the digest of the resulting
+ // image to the file. Ignored for remote calls.
+ DigestFile string
+ // Format is the Manifest type (oci, v2s1, or v2s2) to use when pushing an
+ // image using the 'dir' transport. Default is manifest type of source.
+ // Ignored for remote calls.
+ Format string
+ // Quiet can be specified to suppress pull progress when pulling. Ignored
+ // for remote calls.
+ Quiet bool
+ // RemoveSignatures, discard any pre-existing signatures in the image.
+ // Ignored for remote calls.
+ RemoveSignatures bool
+ // SignaturePolicy to use when pulling. Ignored for remote calls.
+ SignaturePolicy string
+ // SignBy adds a signature at the destination using the specified key.
+ // Ignored for remote calls.
+ SignBy string
+ // TLSVerify to enable/disable HTTPS and certificate verification.
+ TLSVerify types.OptionalBool
+}
+
type ImageListOptions struct {
All bool `json:"all" schema:"all"`
Filter []string `json:"Filter,omitempty"`
@@ -172,3 +209,35 @@ type ImageInspectReport struct {
Images []*ImageData
Errors map[string]error
}
+
+type ImageLoadOptions struct {
+ Name string
+ Tag string
+ Input string
+ Quiet bool
+ SignaturePolicy string
+}
+
+type ImageLoadReport struct {
+ Name string
+}
+
+type ImageImportOptions struct {
+ Changes []string
+ Message string
+ Quiet bool
+ Reference string
+ Source string
+ SourceIsURL bool
+}
+
+type ImageImportReport struct {
+ Id string
+}
+
+type ImageSaveOptions struct {
+ Compress bool
+ Format string
+ Output string
+ Quiet bool
+}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 172c7d1a3..b93e7665a 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -5,6 +5,7 @@ package abi
import (
"context"
"io/ioutil"
+ "strconv"
"strings"
"github.com/containers/buildah"
@@ -12,13 +13,45 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
- "github.com/containers/libpod/pkg/adapter/shortcuts"
+ "github.com/containers/libpod/pkg/checkpoint"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi/terminal"
"github.com/containers/libpod/pkg/signal"
+ "github.com/containers/libpod/pkg/specgen"
+ "github.com/containers/libpod/pkg/specgen/generate"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+// getContainersByContext gets pods whether all, latest, or a slice of names/ids
+// is specified.
+func getContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) {
+ var ctr *libpod.Container
+ ctrs = []*libpod.Container{}
+
+ switch {
+ case all:
+ ctrs, err = runtime.GetAllContainers()
+ case latest:
+ ctr, err = runtime.GetLatestContainer()
+ ctrs = append(ctrs, ctr)
+ default:
+ for _, n := range names {
+ ctr, e := runtime.LookupContainer(n)
+ if e != nil {
+ // Log all errors here, so callers don't need to.
+ logrus.Debugf("Error looking up container %q: %v", n, e)
+ if err == nil {
+ err = e
+ }
+ } else {
+ ctrs = append(ctrs, ctr)
+ }
+ }
+ }
+ return
+}
+
// TODO: Should return *entities.ContainerExistsReport, error
func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) {
_, err := ic.Libpod.LookupContainer(nameOrId)
@@ -32,7 +65,7 @@ func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []strin
var (
responses []entities.WaitReport
)
- ctrs, err := shortcuts.GetContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ ctrs, err := getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
@@ -58,7 +91,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
if options.All {
ctrs, err = ic.Libpod.GetAllContainers()
} else {
- ctrs, err = shortcuts.GetContainersByContext(false, false, namesOrIds, ic.Libpod)
+ ctrs, err = getContainersByContext(false, false, namesOrIds, ic.Libpod)
}
if err != nil {
return nil, err
@@ -79,7 +112,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
if options.All {
ctrs, err = ic.Libpod.GetAllContainers()
} else {
- ctrs, err = shortcuts.GetContainersByContext(false, false, namesOrIds, ic.Libpod)
+ ctrs, err = getContainersByContext(false, false, namesOrIds, ic.Libpod)
}
if err != nil {
return nil, err
@@ -103,7 +136,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
id := strings.Split(string(content), "\n")[0]
names = append(names, id)
}
- ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, names, ic.Libpod)
+ ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
return nil, err
}
@@ -139,7 +172,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
if err != nil {
return nil, err
}
- ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ ctrs, err := getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
@@ -155,7 +188,7 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
var (
reports []*entities.RestartReport
)
- ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
+ ctrs, err := getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
@@ -197,7 +230,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
names = append(names, id)
}
- ctrs, err := shortcuts.GetContainersByContext(options.All, options.Latest, names, ic.Libpod)
+ ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) {
// Failed to get containers. If force is specified, get the containers ID
// and evict them
@@ -245,7 +278,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]*entities.ContainerInspectReport, error) {
var reports []*entities.ContainerInspectReport
- ctrs, err := shortcuts.GetContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ ctrs, err := getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
@@ -325,3 +358,154 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string,
}
return &entities.CommitReport{Id: newImage.ID()}, nil
}
+
+func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrId string, options entities.ContainerExportOptions) error {
+ ctr, err := ic.Libpod.LookupContainer(nameOrId)
+ if err != nil {
+ return err
+ }
+ return ctr.Export(options.Output)
+}
+
+func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds []string, options entities.CheckpointOptions) ([]*entities.CheckpointReport, error) {
+ var (
+ err error
+ cons []*libpod.Container
+ reports []*entities.CheckpointReport
+ )
+ checkOpts := libpod.ContainerCheckpointOptions{
+ Keep: options.Keep,
+ TCPEstablished: options.TCPEstablished,
+ TargetFile: options.Export,
+ IgnoreRootfs: options.IgnoreRootFS,
+ }
+
+ if options.All {
+ running := func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == define.ContainerStateRunning
+ }
+ cons, err = ic.Libpod.GetContainers(running)
+ } else {
+ cons, err = getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range cons {
+ err = con.Checkpoint(ctx, checkOpts)
+ reports = append(reports, &entities.CheckpointReport{
+ Err: err,
+ Id: con.ID(),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []string, options entities.RestoreOptions) ([]*entities.RestoreReport, error) {
+ var (
+ cons []*libpod.Container
+ err error
+ filterFuncs []libpod.ContainerFilter
+ reports []*entities.RestoreReport
+ )
+
+ restoreOptions := libpod.ContainerCheckpointOptions{
+ Keep: options.Keep,
+ TCPEstablished: options.TCPEstablished,
+ TargetFile: options.Import,
+ Name: options.Name,
+ IgnoreRootfs: options.IgnoreRootFS,
+ IgnoreStaticIP: options.IgnoreStaticIP,
+ IgnoreStaticMAC: options.IgnoreStaticMAC,
+ }
+
+ filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == define.ContainerStateExited
+ })
+
+ switch {
+ case options.Import != "":
+ cons, err = checkpoint.CRImportCheckpoint(ctx, ic.Libpod, options.Import, options.Name)
+ case options.All:
+ cons, err = ic.Libpod.GetContainers(filterFuncs...)
+ default:
+ cons, err = getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, con := range cons {
+ err := con.Restore(ctx, restoreOptions)
+ reports = append(reports, &entities.RestoreReport{
+ Err: err,
+ Id: con.ID(),
+ })
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*entities.ContainerCreateReport, error) {
+ if err := generate.CompleteSpec(ctx, ic.Libpod, s); err != nil {
+ return nil, err
+ }
+ ctr, err := generate.MakeContainer(ic.Libpod, s)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.ContainerCreateReport{Id: ctr.ID()}, nil
+}
+
+func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, options entities.AttachOptions) error {
+ ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
+ if err != nil {
+ return err
+ }
+ ctr := ctrs[0]
+ conState, err := ctr.State()
+ if err != nil {
+ return errors.Wrapf(err, "unable to determine state of %s", ctr.ID())
+ }
+ if conState != define.ContainerStateRunning {
+ return errors.Errorf("you can only attach to running containers")
+ }
+
+ // If the container is in a pod, also set to recursively start dependencies
+ if err := terminal.StartAttachCtr(ctx, ctr, options.Stdin, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != define.ErrDetach {
+ return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
+ }
+ return nil
+}
+
+func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) {
+ ec := define.ExecErrorCodeGeneric
+ if options.PreserveFDs > 0 {
+ entries, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return ec, errors.Wrapf(err, "unable to read /proc/self/fd")
+ }
+
+ m := make(map[int]bool)
+ for _, e := range entries {
+ i, err := strconv.Atoi(e.Name())
+ if err != nil {
+ return ec, errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
+ }
+ m[i] = true
+ }
+
+ for i := 3; i < 3+int(options.PreserveFDs); i++ {
+ if _, found := m[i]; !found {
+ return ec, errors.New("invalid --preserve-fds=N specified. Not enough FDs available")
+ }
+ }
+ }
+ ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
+ if err != nil {
+ return ec, err
+ }
+ ctr := ctrs[0]
+ ec, err = terminal.ExecAttachCtr(ctx, ctr, options.Tty, options.Privileged, options.Envs, options.Cmd, options.User, options.WorkDir, &options.Streams, options.PreserveFDs, options.DetachKeys)
+ return define.TranslateExecErrorToExitCode(ec, err), err
+}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 5a7acb2f7..9d706a112 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/docker"
dockerarchive "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod/image"
@@ -20,6 +21,7 @@ import (
domainUtils "github.com/containers/libpod/pkg/domain/utils"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -260,6 +262,64 @@ func (ir *ImageEngine) Inspect(ctx context.Context, names []string, opts entitie
return &report, nil
}
+func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, options entities.ImagePushOptions) error {
+ var writer io.Writer
+ if !options.Quiet {
+ writer = os.Stderr
+ }
+
+ var manifestType string
+ switch options.Format {
+ case "":
+ // Default
+ case "oci":
+ manifestType = imgspecv1.MediaTypeImageManifest
+ case "v2s1":
+ manifestType = manifest.DockerV2Schema1SignedMediaType
+ case "v2s2", "docker":
+ manifestType = manifest.DockerV2Schema2MediaType
+ default:
+ return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", options.Format)
+ }
+
+ var registryCreds *types.DockerAuthConfig
+ if options.Credentials != "" {
+ creds, err := util.ParseRegistryCreds(options.Credentials)
+ if err != nil {
+ return err
+ }
+ registryCreds = creds
+ }
+ dockerRegistryOptions := image.DockerRegistryOptions{
+ DockerRegistryCreds: registryCreds,
+ DockerCertPath: options.CertDir,
+ DockerInsecureSkipTLSVerify: options.TLSVerify,
+ }
+
+ signOptions := image.SigningOptions{
+ RemoveSignatures: options.RemoveSignatures,
+ SignBy: options.SignBy,
+ }
+
+ newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(source)
+ if err != nil {
+ return err
+ }
+
+ return newImage.PushImageToHeuristicDestination(
+ ctx,
+ destination,
+ manifestType,
+ options.Authfile,
+ options.DigestFile,
+ options.SignaturePolicy,
+ writer,
+ options.Compress,
+ signOptions,
+ &dockerRegistryOptions,
+ nil)
+}
+
// func (r *imageRuntime) Delete(ctx context.Context, nameOrId string, opts entities.ImageDeleteOptions) (*entities.ImageDeleteReport, error) {
// image, err := r.libpod.ImageEngine().NewFromLocal(nameOrId)
// if err != nil {
@@ -303,6 +363,7 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string,
}
return nil
}
+
func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string, options entities.ImageUntagOptions) error {
newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
if err != nil {
@@ -315,3 +376,40 @@ func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string
}
return nil
}
+
+func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions) (*entities.ImageLoadReport, error) {
+ var (
+ writer io.Writer
+ )
+ if !opts.Quiet {
+ writer = os.Stderr
+ }
+ name, err := ir.Libpod.LoadImage(ctx, opts.Name, opts.Input, writer, opts.SignaturePolicy)
+ if err != nil {
+ return nil, err
+ }
+ newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(name)
+ if err != nil {
+ return nil, errors.Wrap(err, "image loaded but no additional tags were created")
+ }
+ if err := newImage.TagImage(opts.Name); err != nil {
+ return nil, errors.Wrapf(err, "error adding %q to image %q", opts.Name, newImage.InputName)
+ }
+ return &entities.ImageLoadReport{Name: name}, nil
+}
+
+func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOptions) (*entities.ImageImportReport, error) {
+ id, err := ir.Libpod.Import(ctx, opts.Source, opts.Reference, opts.Changes, opts.Message, opts.Quiet)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.ImageImportReport{Id: id}, nil
+}
+
+func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string, options entities.ImageSaveOptions) error {
+ newImage, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId)
+ if err != nil {
+ return err
+ }
+ return newImage.Save(ctx, nameOrId, options.Format, options.Output, tags, options.Quiet, options.Compress)
+}
diff --git a/pkg/domain/infra/abi/terminal/sigproxy_linux.go b/pkg/domain/infra/abi/terminal/sigproxy_linux.go
new file mode 100644
index 000000000..d7f5853d8
--- /dev/null
+++ b/pkg/domain/infra/abi/terminal/sigproxy_linux.go
@@ -0,0 +1,47 @@
+// +build ABISupport
+
+package terminal
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/signal"
+ "github.com/sirupsen/logrus"
+)
+
+// ProxySignals ...
+func ProxySignals(ctr *libpod.Container) {
+ sigBuffer := make(chan os.Signal, 128)
+ signal.CatchAll(sigBuffer)
+
+ logrus.Debugf("Enabling signal proxying")
+
+ go func() {
+ for s := range sigBuffer {
+ // Ignore SIGCHLD and SIGPIPE - these are mostly likely
+ // intended for the podman command itself.
+ // SIGURG was added because of golang 1.14 and its preemptive changes
+ // causing more signals to "show up".
+ // https://github.com/containers/libpod/issues/5483
+ if s == syscall.SIGCHLD || s == syscall.SIGPIPE || s == syscall.SIGURG {
+ continue
+ }
+
+ if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil {
+ // If the container dies, and we find out here,
+ // we need to forward that one signal to
+ // ourselves so that it is not lost, and then
+ // we terminate the proxy and let the defaults
+ // play out.
+ logrus.Errorf("Error forwarding signal %d to container %s: %v", s, ctr.ID(), err)
+ signal.StopCatch(sigBuffer)
+ if err := syscall.Kill(syscall.Getpid(), s.(syscall.Signal)); err != nil {
+ logrus.Errorf("failed to kill pid %d", syscall.Getpid())
+ }
+ return
+ }
+ }
+ }()
+}
diff --git a/pkg/domain/infra/abi/terminal/terminal.go b/pkg/domain/infra/abi/terminal/terminal.go
new file mode 100644
index 000000000..f187bdd6b
--- /dev/null
+++ b/pkg/domain/infra/abi/terminal/terminal.go
@@ -0,0 +1,103 @@
+// +build ABISupport
+
+package terminal
+
+import (
+ "context"
+ "os"
+ "os/signal"
+
+ lsignal "github.com/containers/libpod/pkg/signal"
+ "github.com/docker/docker/pkg/term"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+// RawTtyFormatter ...
+type RawTtyFormatter struct {
+}
+
+// getResize returns a TerminalSize command matching stdin's current
+// size on success, and nil on errors.
+func getResize() *remotecommand.TerminalSize {
+ winsize, err := term.GetWinsize(os.Stdin.Fd())
+ if err != nil {
+ logrus.Warnf("Could not get terminal size %v", err)
+ return nil
+ }
+ return &remotecommand.TerminalSize{
+ Width: winsize.Width,
+ Height: winsize.Height,
+ }
+}
+
+// Helper for prepareAttach - set up a goroutine to generate terminal resize events
+func resizeTty(ctx context.Context, resize chan remotecommand.TerminalSize) {
+ sigchan := make(chan os.Signal, 1)
+ signal.Notify(sigchan, lsignal.SIGWINCH)
+ go func() {
+ defer close(resize)
+ // Update the terminal size immediately without waiting
+ // for a SIGWINCH to get the correct initial size.
+ resizeEvent := getResize()
+ for {
+ if resizeEvent == nil {
+ select {
+ case <-ctx.Done():
+ return
+ case <-sigchan:
+ resizeEvent = getResize()
+ }
+ } else {
+ select {
+ case <-ctx.Done():
+ return
+ case <-sigchan:
+ resizeEvent = getResize()
+ case resize <- *resizeEvent:
+ resizeEvent = nil
+ }
+ }
+ }
+ }()
+}
+
+func restoreTerminal(state *term.State) error {
+ logrus.SetFormatter(&logrus.TextFormatter{})
+ return term.RestoreTerminal(os.Stdin.Fd(), state)
+}
+
+// Format ...
+func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+ textFormatter := logrus.TextFormatter{}
+ bytes, err := textFormatter.Format(entry)
+
+ if err == nil {
+ bytes = append(bytes, '\r')
+ }
+
+ return bytes, err
+}
+
+func handleTerminalAttach(ctx context.Context, resize chan remotecommand.TerminalSize) (context.CancelFunc, *term.State, error) {
+ logrus.Debugf("Handling terminal attach")
+
+ subCtx, cancel := context.WithCancel(ctx)
+
+ resizeTty(subCtx, resize)
+
+ oldTermState, err := term.SaveState(os.Stdin.Fd())
+ if err != nil {
+ // allow caller to not have to do any cleaning up if we error here
+ cancel()
+ return nil, nil, errors.Wrapf(err, "unable to save terminal state")
+ }
+
+ logrus.SetFormatter(&RawTtyFormatter{})
+ if _, err := term.SetRawTerminal(os.Stdin.Fd()); err != nil {
+ return cancel, nil, err
+ }
+
+ return cancel, oldTermState, nil
+}
diff --git a/pkg/domain/infra/abi/terminal/terminal_linux.go b/pkg/domain/infra/abi/terminal/terminal_linux.go
new file mode 100644
index 000000000..664205df1
--- /dev/null
+++ b/pkg/domain/infra/abi/terminal/terminal_linux.go
@@ -0,0 +1,123 @@
+// +build ABISupport
+
+package terminal
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh/terminal"
+ "k8s.io/client-go/tools/remotecommand"
+)
+
+// ExecAttachCtr execs and attaches to a container
+func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) {
+ resize := make(chan remotecommand.TerminalSize)
+ haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
+
+ // Check if we are attached to a terminal. If we are, generate resize
+ // events, and set the terminal to raw mode
+ if haveTerminal && tty {
+ cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
+ if err != nil {
+ return -1, err
+ }
+ defer cancel()
+ defer func() {
+ if err := restoreTerminal(oldTermState); err != nil {
+ logrus.Errorf("unable to restore terminal: %q", err)
+ }
+ }()
+ }
+
+ execConfig := new(libpod.ExecConfig)
+ execConfig.Command = cmd
+ execConfig.Terminal = tty
+ execConfig.Privileged = privileged
+ execConfig.Environment = env
+ execConfig.User = user
+ execConfig.WorkDir = workDir
+ execConfig.DetachKeys = &detachKeys
+ execConfig.PreserveFDs = preserveFDs
+
+ return ctr.Exec(execConfig, streams, resize)
+}
+
+// StartAttachCtr starts and (if required) attaches to a container
+// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream
+// error. we may need to just lint disable this one.
+func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { //nolint-interfacer
+ resize := make(chan remotecommand.TerminalSize)
+
+ haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
+
+ // Check if we are attached to a terminal. If we are, generate resize
+ // events, and set the terminal to raw mode
+ if haveTerminal && ctr.Spec().Process.Terminal {
+ cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := restoreTerminal(oldTermState); err != nil {
+ logrus.Errorf("unable to restore terminal: %q", err)
+ }
+ }()
+ defer cancel()
+ }
+
+ streams := new(define.AttachStreams)
+ streams.OutputStream = stdout
+ streams.ErrorStream = stderr
+ streams.InputStream = bufio.NewReader(stdin)
+ streams.AttachOutput = true
+ streams.AttachError = true
+ streams.AttachInput = true
+
+ if stdout == nil {
+ logrus.Debugf("Not attaching to stdout")
+ streams.AttachOutput = false
+ }
+ if stderr == nil {
+ logrus.Debugf("Not attaching to stderr")
+ streams.AttachError = false
+ }
+ if stdin == nil {
+ logrus.Debugf("Not attaching to stdin")
+ streams.AttachInput = false
+ }
+
+ if !startContainer {
+ if sigProxy {
+ ProxySignals(ctr)
+ }
+
+ return ctr.Attach(streams, detachKeys, resize)
+ }
+
+ attachChan, err := ctr.StartAndAttach(ctx, streams, detachKeys, resize, recursive)
+ if err != nil {
+ return err
+ }
+
+ if sigProxy {
+ ProxySignals(ctr)
+ }
+
+ if stdout == nil && stderr == nil {
+ fmt.Printf("%s\n", ctr.ID())
+ }
+
+ err = <-attachChan
+ if err != nil {
+ return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
+ }
+
+ return nil
+}
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index c1bade4ba..1e71cba2c 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -2,11 +2,15 @@ package tunnel
import (
"context"
+ "io"
+ "os"
"github.com/containers/image/v5/docker/reference"
-
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/api/handlers/libpod"
"github.com/containers/libpod/pkg/bindings/containers"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/specgen"
"github.com/pkg/errors"
)
@@ -210,3 +214,102 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrId string,
}
return &entities.CommitReport{Id: response.ID}, nil
}
+
+func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrId string, options entities.ContainerExportOptions) error {
+ var (
+ err error
+ w io.Writer
+ )
+ if len(options.Output) > 0 {
+ w, err = os.Create(options.Output)
+ if err != nil {
+ return err
+ }
+ }
+ return containers.Export(ic.ClientCxt, nameOrId, w)
+}
+
+func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds []string, options entities.CheckpointOptions) ([]*entities.CheckpointReport, error) {
+ var (
+ reports []*entities.CheckpointReport
+ err error
+ ctrs []libpod.ListContainer
+ )
+
+ if options.All {
+ allCtrs, err := getContainersByContext(ic.ClientCxt, true, []string{})
+ if err != nil {
+ return nil, err
+ }
+ // narrow the list to running only
+ for _, c := range allCtrs {
+ if c.State == define.ContainerStateRunning.String() {
+ ctrs = append(ctrs, c)
+ }
+ }
+
+ } else {
+ ctrs, err = getContainersByContext(ic.ClientCxt, false, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, c := range ctrs {
+ report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRuninng, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export)
+ if err != nil {
+ reports = append(reports, &entities.CheckpointReport{Id: c.ID, Err: err})
+ }
+ reports = append(reports, report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []string, options entities.RestoreOptions) ([]*entities.RestoreReport, error) {
+ var (
+ reports []*entities.RestoreReport
+ err error
+ ctrs []libpod.ListContainer
+ )
+ if options.All {
+ allCtrs, err := getContainersByContext(ic.ClientCxt, true, []string{})
+ if err != nil {
+ return nil, err
+ }
+ // narrow the list to exited only
+ for _, c := range allCtrs {
+ if c.State == define.ContainerStateExited.String() {
+ ctrs = append(ctrs, c)
+ }
+ }
+
+ } else {
+ ctrs, err = getContainersByContext(ic.ClientCxt, false, namesOrIds)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, c := range ctrs {
+ report, err := containers.Restore(ic.ClientCxt, c.ID, &options.Keep, &options.TCPEstablished, &options.IgnoreRootFS, &options.IgnoreStaticIP, &options.IgnoreStaticMAC, &options.Name, &options.Import)
+ if err != nil {
+ reports = append(reports, &entities.RestoreReport{Id: c.ID, Err: err})
+ }
+ reports = append(reports, report)
+ }
+ return reports, nil
+}
+
+func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*entities.ContainerCreateReport, error) {
+ response, err := containers.CreateWithSpec(ic.ClientCxt, s)
+ if err != nil {
+ return nil, err
+ }
+ return &entities.ContainerCreateReport{Id: response.ID}, nil
+}
+
+func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, options entities.AttachOptions) error {
+ return errors.New("not implemented")
+}
+
+func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) {
+ return 125, errors.New("not implemented")
+}
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index 6a8b9be37..516914a68 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -2,11 +2,14 @@ package tunnel
import (
"context"
+ "io/ioutil"
+ "os"
"github.com/containers/image/v5/docker/reference"
images "github.com/containers/libpod/pkg/bindings/images"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/domain/utils"
+ utils2 "github.com/containers/libpod/utils"
"github.com/pkg/errors"
)
@@ -157,3 +160,84 @@ func (ir *ImageEngine) Inspect(_ context.Context, names []string, opts entities.
}
return &report, nil
}
+
+func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions) (*entities.ImageLoadReport, error) {
+ f, err := os.Open(opts.Input)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return images.Load(ir.ClientCxt, f, &opts.Name)
+}
+
+func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOptions) (*entities.ImageImportReport, error) {
+ var (
+ err error
+ sourceURL *string
+ f *os.File
+ )
+ if opts.SourceIsURL {
+ sourceURL = &opts.Source
+ } else {
+ f, err = os.Open(opts.Source)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return images.Import(ir.ClientCxt, opts.Changes, &opts.Message, &opts.Reference, sourceURL, f)
+}
+
+func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, options entities.ImagePushOptions) error {
+ return images.Push(ir.ClientCxt, source, destination, options)
+}
+
+func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string, options entities.ImageSaveOptions) error {
+ var (
+ f *os.File
+ err error
+ )
+
+ switch options.Format {
+ case "oci-dir", "docker-dir":
+ f, err = ioutil.TempFile("", "podman_save")
+ if err == nil {
+ defer func() { _ = os.Remove(f.Name()) }()
+ }
+ default:
+ f, err = os.Create(options.Output)
+ }
+ if err != nil {
+ return err
+ }
+
+ exErr := images.Export(ir.ClientCxt, nameOrId, f, &options.Format, &options.Compress)
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if exErr != nil {
+ return exErr
+ }
+
+ if options.Format != "oci-dir" && options.Format != "docker-dir" {
+ return nil
+ }
+
+ f, err = os.Open(f.Name())
+ if err != nil {
+ return err
+ }
+ info, err := os.Stat(options.Output)
+ switch {
+ case err == nil:
+ if info.Mode().IsRegular() {
+ return errors.Errorf("%q already exists as a regular file", options.Output)
+ }
+ case os.IsNotExist(err):
+ if err := os.Mkdir(options.Output, 0755); err != nil {
+ return err
+ }
+ default:
+ return err
+ }
+ return utils2.UntarToFileSystem(options.Output, f, nil)
+}
diff --git a/pkg/domain/infra/tunnel/system.go b/pkg/domain/infra/tunnel/system.go
new file mode 100644
index 000000000..5bafef1fe
--- /dev/null
+++ b/pkg/domain/infra/tunnel/system.go
@@ -0,0 +1 @@
+package tunnel
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index d4fd5976f..5de07fc28 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -316,7 +316,17 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
// Make sure to always set the default variables unless overridden in the
// config.
- config.Env = env.Join(env.DefaultEnvVariables, config.Env)
+ var defaultEnv map[string]string
+ if runtimeConfig == nil {
+ defaultEnv = env.DefaultEnvVariables
+ } else {
+ defaultEnv, err = env.ParseSlice(runtimeConfig.Containers.Env)
+ if err != nil {
+ return nil, errors.Wrap(err, "Env fields in containers.conf failed ot parse")
+ }
+ defaultEnv = env.Join(env.DefaultEnvVariables, defaultEnv)
+ }
+ config.Env = env.Join(defaultEnv, config.Env)
for name, val := range config.Env {
g.AddProcessEnv(name, val)
}
@@ -371,11 +381,9 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
// BIND MOUNTS
configSpec.Mounts = SupercedeUserMounts(userMounts, configSpec.Mounts)
// Process mounts to ensure correct options
- finalMounts, err := InitFSMounts(configSpec.Mounts)
- if err != nil {
+ if err := InitFSMounts(configSpec.Mounts); err != nil {
return nil, err
}
- configSpec.Mounts = finalMounts
// BLOCK IO
blkio, err := config.CreateBlockIO()
diff --git a/pkg/spec/storage.go b/pkg/spec/storage.go
index b0687b4c2..68a84d638 100644
--- a/pkg/spec/storage.go
+++ b/pkg/spec/storage.go
@@ -10,7 +10,6 @@ import (
"github.com/containers/buildah/pkg/parse"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/util"
- pmount "github.com/containers/storage/pkg/mount"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -855,75 +854,22 @@ func SupercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M
}
// Ensure mount options on all mounts are correct
-func InitFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) {
- // We need to look up mounts so we can figure out the proper mount flags
- // to apply.
- systemMounts, err := pmount.GetMounts()
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving system mounts to look up mount options")
- }
-
- // TODO: We probably don't need to re-build the mounts array
- var mounts []spec.Mount
- for _, m := range inputMounts {
- if m.Type == TypeBind {
- baseMnt, err := findMount(m.Destination, systemMounts)
+func InitFSMounts(mounts []spec.Mount) error {
+ for i, m := range mounts {
+ switch {
+ case m.Type == TypeBind:
+ opts, err := util.ProcessOptions(m.Options, false, m.Source)
if err != nil {
- return nil, errors.Wrapf(err, "error looking up mountpoint for mount %s", m.Destination)
- }
- var noexec, nosuid, nodev bool
- for _, baseOpt := range strings.Split(baseMnt.Opts, ",") {
- switch baseOpt {
- case "noexec":
- noexec = true
- case "nosuid":
- nosuid = true
- case "nodev":
- nodev = true
- }
+ return err
}
-
- defaultMountOpts := new(util.DefaultMountOptions)
- defaultMountOpts.Noexec = noexec
- defaultMountOpts.Nosuid = nosuid
- defaultMountOpts.Nodev = nodev
-
- opts, err := util.ProcessOptions(m.Options, false, defaultMountOpts)
+ mounts[i].Options = opts
+ case m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev":
+ opts, err := util.ProcessOptions(m.Options, true, "")
if err != nil {
- return nil, err
+ return err
}
- m.Options = opts
- }
- if m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev" {
- opts, err := util.ProcessOptions(m.Options, true, nil)
- if err != nil {
- return nil, err
- }
- m.Options = opts
- }
-
- mounts = append(mounts, m)
- }
- return mounts, nil
-}
-
-// TODO: We could make this a bit faster by building a tree of the mountpoints
-// and traversing it to identify the correct mount.
-func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) {
- var err error
- target, err = filepath.Abs(target)
- if err != nil {
- return nil, errors.Wrapf(err, "cannot resolve %s", target)
- }
- var bestSoFar *pmount.Info
- for _, i := range mounts {
- if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) {
- // Won't be better than what we have already found
- continue
- }
- if strings.HasPrefix(target, i.Mountpoint) {
- bestSoFar = i
+ mounts[i].Options = opts
}
}
- return bestSoFar, nil
+ return nil
}
diff --git a/pkg/specgen/config_linux.go b/pkg/specgen/config_linux.go
new file mode 100644
index 000000000..82a371492
--- /dev/null
+++ b/pkg/specgen/config_linux.go
@@ -0,0 +1,93 @@
+package specgen
+
+//func createBlockIO() (*spec.LinuxBlockIO, error) {
+// var ret *spec.LinuxBlockIO
+// bio := &spec.LinuxBlockIO{}
+// if c.Resources.BlkioWeight > 0 {
+// ret = bio
+// bio.Weight = &c.Resources.BlkioWeight
+// }
+// if len(c.Resources.BlkioWeightDevice) > 0 {
+// var lwds []spec.LinuxWeightDevice
+// ret = bio
+// for _, i := range c.Resources.BlkioWeightDevice {
+// wd, err := ValidateweightDevice(i)
+// if err != nil {
+// return ret, errors.Wrapf(err, "invalid values for blkio-weight-device")
+// }
+// wdStat, err := GetStatFromPath(wd.Path)
+// if err != nil {
+// return ret, errors.Wrapf(err, "error getting stat from path %q", wd.Path)
+// }
+// lwd := spec.LinuxWeightDevice{
+// Weight: &wd.Weight,
+// }
+// lwd.Major = int64(unix.Major(wdStat.Rdev))
+// lwd.Minor = int64(unix.Minor(wdStat.Rdev))
+// lwds = append(lwds, lwd)
+// }
+// bio.WeightDevice = lwds
+// }
+// if len(c.Resources.DeviceReadBps) > 0 {
+// ret = bio
+// readBps, err := makeThrottleArray(c.Resources.DeviceReadBps, bps)
+// if err != nil {
+// return ret, err
+// }
+// bio.ThrottleReadBpsDevice = readBps
+// }
+// if len(c.Resources.DeviceWriteBps) > 0 {
+// ret = bio
+// writeBpds, err := makeThrottleArray(c.Resources.DeviceWriteBps, bps)
+// if err != nil {
+// return ret, err
+// }
+// bio.ThrottleWriteBpsDevice = writeBpds
+// }
+// if len(c.Resources.DeviceReadIOps) > 0 {
+// ret = bio
+// readIOps, err := makeThrottleArray(c.Resources.DeviceReadIOps, iops)
+// if err != nil {
+// return ret, err
+// }
+// bio.ThrottleReadIOPSDevice = readIOps
+// }
+// if len(c.Resources.DeviceWriteIOps) > 0 {
+// ret = bio
+// writeIOps, err := makeThrottleArray(c.Resources.DeviceWriteIOps, iops)
+// if err != nil {
+// return ret, err
+// }
+// bio.ThrottleWriteIOPSDevice = writeIOps
+// }
+// return ret, nil
+//}
+
+//func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrottleDevice, error) {
+// var (
+// ltds []spec.LinuxThrottleDevice
+// t *throttleDevice
+// err error
+// )
+// for _, i := range throttleInput {
+// if rateType == bps {
+// t, err = validateBpsDevice(i)
+// } else {
+// t, err = validateIOpsDevice(i)
+// }
+// if err != nil {
+// return []spec.LinuxThrottleDevice{}, err
+// }
+// ltdStat, err := GetStatFromPath(t.path)
+// if err != nil {
+// return ltds, errors.Wrapf(err, "error getting stat from path %q", t.path)
+// }
+// ltd := spec.LinuxThrottleDevice{
+// Rate: t.rate,
+// }
+// ltd.Major = int64(unix.Major(ltdStat.Rdev))
+// ltd.Minor = int64(unix.Minor(ltdStat.Rdev))
+// ltds = append(ltds, ltd)
+// }
+// return ltds, nil
+//}
diff --git a/pkg/specgen/config_linux_cgo.go b/pkg/specgen/config_linux_cgo.go
index 6f547a40d..ef6c6e951 100644
--- a/pkg/specgen/config_linux_cgo.go
+++ b/pkg/specgen/config_linux_cgo.go
@@ -17,7 +17,6 @@ import (
func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec, img *image.Image) (*spec.LinuxSeccomp, error) {
var seccompConfig *spec.LinuxSeccomp
var err error
-
scp, err := seccomp.LookupPolicy(s.SeccompPolicy)
if err != nil {
return nil, err
diff --git a/pkg/specgen/container_validate.go b/pkg/specgen/container_validate.go
index b27659f5f..aad14ddcb 100644
--- a/pkg/specgen/container_validate.go
+++ b/pkg/specgen/container_validate.go
@@ -14,7 +14,7 @@ var (
// SystemDValues describes the only values that SystemD can be
SystemDValues = []string{"true", "false", "always"}
// ImageVolumeModeValues describes the only values that ImageVolumeMode can be
- ImageVolumeModeValues = []string{"ignore", "tmpfs", "anonymous"}
+ ImageVolumeModeValues = []string{"ignore", "tmpfs", "bind"}
)
func exclusiveOptions(opt1, opt2 string) error {
@@ -23,7 +23,7 @@ func exclusiveOptions(opt1, opt2 string) error {
// Validate verifies that the given SpecGenerator is valid and satisfies required
// input for creating a container.
-func (s *SpecGenerator) validate() error {
+func (s *SpecGenerator) Validate() error {
//
// ContainerBasicConfig
diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go
new file mode 100644
index 000000000..78c77fec1
--- /dev/null
+++ b/pkg/specgen/generate/container.go
@@ -0,0 +1,168 @@
+package generate
+
+import (
+ "context"
+
+ "github.com/containers/libpod/libpod"
+ ann "github.com/containers/libpod/pkg/annotations"
+ envLib "github.com/containers/libpod/pkg/env"
+ "github.com/containers/libpod/pkg/signal"
+ "github.com/containers/libpod/pkg/specgen"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) error {
+
+ newImage, err := r.ImageRuntime().NewFromLocal(s.Image)
+ if err != nil {
+ return err
+ }
+
+ // Image stop signal
+ if s.StopSignal == nil && newImage.Config != nil {
+ sig, err := signal.ParseSignalNameOrNumber(newImage.Config.StopSignal)
+ if err != nil {
+ return err
+ }
+ s.StopSignal = &sig
+ }
+ // Image envs from the image if they don't exist
+ // already
+ if newImage.Config != nil && len(newImage.Config.Env) > 0 {
+ envs, err := envLib.ParseSlice(newImage.Config.Env)
+ if err != nil {
+ return err
+ }
+ for k, v := range envs {
+ if _, exists := s.Env[k]; !exists {
+ s.Env[v] = k
+ }
+ }
+ }
+
+ // labels from the image that dont exist already
+ if config := newImage.Config; config != nil {
+ for k, v := range config.Labels {
+ if _, exists := s.Labels[k]; !exists {
+ s.Labels[k] = v
+ }
+ }
+ }
+
+ // annotations
+ // in the event this container is in a pod, and the pod has an infra container
+ // we will want to configure it as a type "container" instead defaulting to
+ // the behavior of a "sandbox" container
+ // In Kata containers:
+ // - "sandbox" is the annotation that denotes the container should use its own
+ // VM, which is the default behavior
+ // - "container" denotes the container should join the VM of the SandboxID
+ // (the infra container)
+ s.Annotations = make(map[string]string)
+ if len(s.Pod) > 0 {
+ s.Annotations[ann.SandboxID] = s.Pod
+ s.Annotations[ann.ContainerType] = ann.ContainerTypeContainer
+ }
+ //
+ // Next, add annotations from the image
+ annotations, err := newImage.Annotations(ctx)
+ if err != nil {
+ return err
+ }
+ for k, v := range annotations {
+ annotations[k] = v
+ }
+
+ // entrypoint
+ if config := newImage.Config; config != nil {
+ if len(s.Entrypoint) < 1 && len(config.Entrypoint) > 0 {
+ s.Entrypoint = config.Entrypoint
+ }
+ if len(s.Command) < 1 && len(config.Cmd) > 0 {
+ s.Command = config.Cmd
+ }
+ if len(s.Command) < 1 && len(s.Entrypoint) < 1 {
+ return errors.Errorf("No command provided or as CMD or ENTRYPOINT in this image")
+ }
+ // workdir
+ if len(s.WorkDir) < 1 && len(config.WorkingDir) > 1 {
+ s.WorkDir = config.WorkingDir
+ }
+ }
+
+ if len(s.SeccompProfilePath) < 1 {
+ p, err := libpod.DefaultSeccompPath()
+ if err != nil {
+ return err
+ }
+ s.SeccompProfilePath = p
+ }
+
+ if user := s.User; len(user) == 0 {
+ switch {
+ // TODO This should be enabled when namespaces actually work
+ //case usernsMode.IsKeepID():
+ // user = fmt.Sprintf("%d:%d", rootless.GetRootlessUID(), rootless.GetRootlessGID())
+ case newImage.Config == nil || (newImage.Config != nil && len(newImage.Config.User) == 0):
+ s.User = "0"
+ default:
+ s.User = newImage.Config.User
+ }
+ }
+ if err := finishThrottleDevices(s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// finishThrottleDevices takes the temporary representation of the throttle
+// devices in the specgen and looks up the major and major minors. it then
+// sets the throttle devices proper in the specgen
+func finishThrottleDevices(s *specgen.SpecGenerator) error {
+ if bps := s.ThrottleReadBpsDevice; len(bps) > 0 {
+ for k, v := range bps {
+ statT := unix.Stat_t{}
+ if err := unix.Stat(k, &statT); err != nil {
+ return err
+ }
+ v.Major = (int64(unix.Major(statT.Rdev)))
+ v.Minor = (int64(unix.Minor(statT.Rdev)))
+ s.ResourceLimits.BlockIO.ThrottleReadBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleReadBpsDevice, v)
+ }
+ }
+ if bps := s.ThrottleWriteBpsDevice; len(bps) > 0 {
+ for k, v := range bps {
+ statT := unix.Stat_t{}
+ if err := unix.Stat(k, &statT); err != nil {
+ return err
+ }
+ v.Major = (int64(unix.Major(statT.Rdev)))
+ v.Minor = (int64(unix.Minor(statT.Rdev)))
+ s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice, v)
+ }
+ }
+ if iops := s.ThrottleReadIOPSDevice; len(iops) > 0 {
+ for k, v := range iops {
+ statT := unix.Stat_t{}
+ if err := unix.Stat(k, &statT); err != nil {
+ return err
+ }
+ v.Major = (int64(unix.Major(statT.Rdev)))
+ v.Minor = (int64(unix.Minor(statT.Rdev)))
+ s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice, v)
+ }
+ }
+ if iops := s.ThrottleWriteBpsDevice; len(iops) > 0 {
+ for k, v := range iops {
+ statT := unix.Stat_t{}
+ if err := unix.Stat(k, &statT); err != nil {
+ return err
+ }
+ v.Major = (int64(unix.Major(statT.Rdev)))
+ v.Minor = (int64(unix.Minor(statT.Rdev)))
+ s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice, v)
+ }
+ }
+ return nil
+}
diff --git a/pkg/specgen/container_create.go b/pkg/specgen/generate/container_create.go
index b4039bb91..aad59a861 100644
--- a/pkg/specgen/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -1,4 +1,4 @@
-package specgen
+package generate
import (
"context"
@@ -7,14 +7,15 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/specgen"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// MakeContainer creates a container based on the SpecGenerator
-func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, error) {
- if err := s.validate(); err != nil {
+func MakeContainer(rt *libpod.Runtime, s *specgen.SpecGenerator) (*libpod.Container, error) {
+ if err := s.Validate(); err != nil {
return nil, errors.Wrap(err, "invalid config provided")
}
rtc, err := rt.GetConfig()
@@ -22,7 +23,7 @@ func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, er
return nil, err
}
- options, err := s.createContainerOptions(rt)
+ options, err := createContainerOptions(rt, s)
if err != nil {
return nil, err
}
@@ -31,7 +32,7 @@ func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, er
if err != nil {
return nil, err
}
- options = append(options, s.createExitCommandOption(rt.StorageConfig(), rtc, podmanPath))
+ options = append(options, createExitCommandOption(s, rt.StorageConfig(), rtc, podmanPath))
newImage, err := rt.ImageRuntime().NewFromLocal(s.Image)
if err != nil {
return nil, err
@@ -39,14 +40,14 @@ func (s *SpecGenerator) MakeContainer(rt *libpod.Runtime) (*libpod.Container, er
options = append(options, libpod.WithRootFSFromImage(newImage.ID(), s.Image, s.RawImageName))
- runtimeSpec, err := s.toOCISpec(rt, newImage)
+ runtimeSpec, err := s.ToOCISpec(rt, newImage)
if err != nil {
return nil, err
}
return rt.NewContainer(context.Background(), runtimeSpec, options...)
}
-func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator) ([]libpod.CtrCreateOption, error) {
var options []libpod.CtrCreateOption
var err error
@@ -114,7 +115,7 @@ func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime) ([]libpod.Ctr
options = append(options, libpod.WithPrivileged(s.Privileged))
// Get namespace related options
- namespaceOptions, err := s.generateNamespaceContainerOpts(rt)
+ namespaceOptions, err := s.GenerateNamespaceContainerOpts(rt)
if err != nil {
return nil, err
}
@@ -149,7 +150,7 @@ func (s *SpecGenerator) createContainerOptions(rt *libpod.Runtime) ([]libpod.Ctr
return options, nil
}
-func (s *SpecGenerator) createExitCommandOption(storageConfig storage.StoreOptions, config *config.Config, podmanPath string) libpod.CtrCreateOption {
+func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.StoreOptions, config *config.Config, podmanPath string) libpod.CtrCreateOption {
// We need a cleanup process for containers in the current model.
// But we can't assume that the caller is Podman - it could be another
// user of the API.
diff --git a/pkg/specgen/namespaces.go b/pkg/specgen/namespaces.go
index fa2dee77d..2a7bb3495 100644
--- a/pkg/specgen/namespaces.go
+++ b/pkg/specgen/namespaces.go
@@ -16,6 +16,9 @@ import (
type NamespaceMode string
const (
+ // Default indicates the spec generator should determine
+ // a sane default
+ Default NamespaceMode = "default"
// Host means the the namespace is derived from
// the host
Host NamespaceMode = "host"
@@ -83,7 +86,7 @@ func validateNetNS(n *Namespace) error {
return nil
}
-// validate perform simple validation on the namespace to make sure it is not
+// Validate perform simple validation on the namespace to make sure it is not
// invalid from the get-go
func (n *Namespace) validate() error {
if n == nil {
@@ -103,7 +106,7 @@ func (n *Namespace) validate() error {
return nil
}
-func (s *SpecGenerator) generateNamespaceContainerOpts(rt *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+func (s *SpecGenerator) GenerateNamespaceContainerOpts(rt *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
var portBindings []ocicni.PortMapping
options := make([]libpod.CtrCreateOption, 0)
diff --git a/pkg/specgen/oci.go b/pkg/specgen/oci.go
index 2523f21b3..0756782b4 100644
--- a/pkg/specgen/oci.go
+++ b/pkg/specgen/oci.go
@@ -11,7 +11,7 @@ import (
"github.com/opencontainers/runtime-tools/generate"
)
-func (s *SpecGenerator) toOCISpec(rt *libpod.Runtime, newImage *image.Image) (*spec.Spec, error) {
+func (s *SpecGenerator) ToOCISpec(rt *libpod.Runtime, newImage *image.Image) (*spec.Spec, error) {
var (
inUserNS bool
)
@@ -215,11 +215,9 @@ func (s *SpecGenerator) toOCISpec(rt *libpod.Runtime, newImage *image.Image) (*s
// BIND MOUNTS
configSpec.Mounts = createconfig.SupercedeUserMounts(s.Mounts, configSpec.Mounts)
// Process mounts to ensure correct options
- finalMounts, err := createconfig.InitFSMounts(configSpec.Mounts)
- if err != nil {
+ if err := createconfig.InitFSMounts(configSpec.Mounts); err != nil {
return nil, err
}
- configSpec.Mounts = finalMounts
// Add annotations
if configSpec.Annotations == nil {
diff --git a/pkg/specgen/security.go b/pkg/specgen/security.go
new file mode 100644
index 000000000..158e4a7b3
--- /dev/null
+++ b/pkg/specgen/security.go
@@ -0,0 +1,165 @@
+package specgen
+
+// ToCreateOptions convert the SecurityConfig to a slice of container create
+// options.
+/*
+func (c *SecurityConfig) ToCreateOptions() ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ options = append(options, libpod.WithSecLabels(c.LabelOpts))
+ options = append(options, libpod.WithPrivileged(c.Privileged))
+ return options, nil
+}
+*/
+
+// SetLabelOpts sets the label options of the SecurityConfig according to the
+// input.
+/*
+func (c *SecurityConfig) SetLabelOpts(runtime *libpod.Runtime, pidConfig *PidConfig, ipcConfig *IpcConfig) error {
+ if c.Privileged {
+ c.LabelOpts = label.DisableSecOpt()
+ return nil
+ }
+
+ var labelOpts []string
+ if pidConfig.PidMode.IsHost() {
+ labelOpts = append(labelOpts, label.DisableSecOpt()...)
+ } else if pidConfig.PidMode.IsContainer() {
+ ctr, err := runtime.LookupContainer(pidConfig.PidMode.Container())
+ if err != nil {
+ return errors.Wrapf(err, "container %q not found", pidConfig.PidMode.Container())
+ }
+ secopts, err := label.DupSecOpt(ctr.ProcessLabel())
+ if err != nil {
+ return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ }
+ labelOpts = append(labelOpts, secopts...)
+ }
+
+ if ipcConfig.IpcMode.IsHost() {
+ labelOpts = append(labelOpts, label.DisableSecOpt()...)
+ } else if ipcConfig.IpcMode.IsContainer() {
+ ctr, err := runtime.LookupContainer(ipcConfig.IpcMode.Container())
+ if err != nil {
+ return errors.Wrapf(err, "container %q not found", ipcConfig.IpcMode.Container())
+ }
+ secopts, err := label.DupSecOpt(ctr.ProcessLabel())
+ if err != nil {
+ return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ }
+ labelOpts = append(labelOpts, secopts...)
+ }
+
+ c.LabelOpts = append(c.LabelOpts, labelOpts...)
+ return nil
+}
+*/
+
+// SetSecurityOpts the the security options (labels, apparmor, seccomp, etc.).
+func SetSecurityOpts(securityOpts []string) error {
+ return nil
+}
+
+// ConfigureGenerator configures the generator according to the input.
+/*
+func (c *SecurityConfig) ConfigureGenerator(g *generate.Generator, user *UserConfig) error {
+ // HANDLE CAPABILITIES
+ // NOTE: Must happen before SECCOMP
+ if c.Privileged {
+ g.SetupPrivileged(true)
+ }
+
+ useNotRoot := func(user string) bool {
+ if user == "" || user == "root" || user == "0" {
+ return false
+ }
+ return true
+ }
+
+ configSpec := g.Config
+ var err error
+ var defaultCaplist []string
+ bounding := configSpec.Process.Capabilities.Bounding
+ if useNotRoot(user.User) {
+ configSpec.Process.Capabilities.Bounding = defaultCaplist
+ }
+ defaultCaplist, err = capabilities.MergeCapabilities(configSpec.Process.Capabilities.Bounding, c.CapAdd, c.CapDrop)
+ if err != nil {
+ return err
+ }
+
+ privCapRequired := []string{}
+
+ if !c.Privileged && len(c.CapRequired) > 0 {
+ // Pass CapRequired in CapAdd field to normalize capabilities names
+ capRequired, err := capabilities.MergeCapabilities(nil, c.CapRequired, nil)
+ if err != nil {
+ logrus.Errorf("capabilities requested by user or image are not valid: %q", strings.Join(c.CapRequired, ","))
+ } else {
+ // Verify all capRequiered are in the defaultCapList
+ for _, cap := range capRequired {
+ if !util.StringInSlice(cap, defaultCaplist) {
+ privCapRequired = append(privCapRequired, cap)
+ }
+ }
+ }
+ if len(privCapRequired) == 0 {
+ defaultCaplist = capRequired
+ } else {
+ logrus.Errorf("capabilities requested by user or image are not allowed by default: %q", strings.Join(privCapRequired, ","))
+ }
+ }
+ configSpec.Process.Capabilities.Bounding = defaultCaplist
+ configSpec.Process.Capabilities.Permitted = defaultCaplist
+ configSpec.Process.Capabilities.Inheritable = defaultCaplist
+ configSpec.Process.Capabilities.Effective = defaultCaplist
+ configSpec.Process.Capabilities.Ambient = defaultCaplist
+ if useNotRoot(user.User) {
+ defaultCaplist, err = capabilities.MergeCapabilities(bounding, c.CapAdd, c.CapDrop)
+ if err != nil {
+ return err
+ }
+ }
+ configSpec.Process.Capabilities.Bounding = defaultCaplist
+
+ // HANDLE SECCOMP
+ if c.SeccompProfilePath != "unconfined" {
+ seccompConfig, err := getSeccompConfig(c, configSpec)
+ if err != nil {
+ return err
+ }
+ configSpec.Linux.Seccomp = seccompConfig
+ }
+
+ // Clear default Seccomp profile from Generator for privileged containers
+ if c.SeccompProfilePath == "unconfined" || c.Privileged {
+ configSpec.Linux.Seccomp = nil
+ }
+
+ for _, opt := range c.SecurityOpts {
+ // Split on both : and =
+ splitOpt := strings.Split(opt, "=")
+ if len(splitOpt) == 1 {
+ splitOpt = strings.Split(opt, ":")
+ }
+ if len(splitOpt) < 2 {
+ continue
+ }
+ switch splitOpt[0] {
+ case "label":
+ configSpec.Annotations[libpod.InspectAnnotationLabel] = splitOpt[1]
+ case "seccomp":
+ configSpec.Annotations[libpod.InspectAnnotationSeccomp] = splitOpt[1]
+ case "apparmor":
+ configSpec.Annotations[libpod.InspectAnnotationApparmor] = splitOpt[1]
+ }
+ }
+
+ g.SetRootReadonly(c.ReadOnlyRootfs)
+ for sysctlKey, sysctlVal := range c.Sysctl {
+ g.AddLinuxSysctl(sysctlKey, sysctlVal)
+ }
+
+ return nil
+}
+
+*/
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index 89c76c273..2e6dd9c1d 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -4,8 +4,9 @@ import (
"net"
"syscall"
- "github.com/containers/image/v5/manifest"
"github.com/containers/libpod/libpod"
+
+ "github.com/containers/image/v5/manifest"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -371,6 +372,16 @@ type ContainerResourceConfig struct {
// processes to kill for the container's process.
// Optional.
OOMScoreAdj *int `json:"oom_score_adj,omitempty"`
+ // Weight per cgroup per device, can override BlkioWeight
+ WeightDevice map[string]spec.LinuxWeightDevice `json:"weightDevice,omitempty"`
+ // IO read rate limit per cgroup per device, bytes per second
+ ThrottleReadBpsDevice map[string]spec.LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"`
+ // IO write rate limit per cgroup per device, bytes per second
+ ThrottleWriteBpsDevice map[string]spec.LinuxThrottleDevice `json:"throttleWriteBpsDevice,omitempty"`
+ // IO read rate limit per cgroup per device, IO per second
+ ThrottleReadIOPSDevice map[string]spec.LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"`
+ // IO write rate limit per cgroup per device, IO per second
+ ThrottleWriteIOPSDevice map[string]spec.LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"`
}
// ContainerHealthCheckConfig describes a container healthcheck with attributes
diff --git a/pkg/specgen/storage.go b/pkg/specgen/storage.go
new file mode 100644
index 000000000..1b903f608
--- /dev/null
+++ b/pkg/specgen/storage.go
@@ -0,0 +1,885 @@
+package specgen
+
+//nolint
+
+import (
+ "fmt"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+
+ "github.com/containers/buildah/pkg/parse"
+ "github.com/containers/libpod/pkg/util"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind = "bind"
+ // TypeVolume is the type for named volumes
+ TypeVolume = "volume"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs = "tmpfs"
+)
+
+var (
+ errDuplicateDest = errors.Errorf("duplicate mount destination") //nolint
+ optionArgError = errors.Errorf("must provide an argument for option") //nolint
+ noDestError = errors.Errorf("must set volume destination") //nolint
+)
+
+// Parse all volume-related options in the create config into a set of mounts
+// and named volumes to add to the container.
+// Handles --volumes-from, --volumes, --tmpfs, --init, and --init-path flags.
+// TODO: Named volume options - should we default to rprivate? It bakes into a
+// bind mount under the hood...
+// TODO: handle options parsing/processing via containers/storage/pkg/mount
+func (s *SpecGenerator) parseVolumes(mounts, volMounts, tmpMounts []string) error { //nolint
+
+ // TODO this needs to come from the image and erquires a runtime
+
+ // Add image volumes.
+ //baseMounts, baseVolumes, err := config.getImageVolumes()
+ //if err != nil {
+ // return nil, nil, err
+ //}
+
+ // Add --volumes-from.
+ // Overrides image volumes unconditionally.
+ //vFromMounts, vFromVolumes, err := config.getVolumesFrom(runtime)
+ //if err != nil {
+ // return nil, nil, err
+ //}
+ //for dest, mount := range vFromMounts {
+ // baseMounts[dest] = mount
+ //}
+ //for dest, volume := range vFromVolumes {
+ // baseVolumes[dest] = volume
+ //}
+
+ // Next mounts from the --mounts flag.
+ // Do not override yet.
+ //unifiedMounts, _, err := getMounts(mounts)
+ //if err != nil {
+ // return err
+ //}
+ //
+ //// Next --volumes flag.
+ //// Do not override yet.
+ //volumeMounts, _ , err := getVolumeMounts(volMounts)
+ //if err != nil {
+ // return err
+ //}
+ //
+ //// Next --tmpfs flag.
+ //// Do not override yet.
+ //tmpfsMounts, err := getTmpfsMounts(tmpMounts)
+ //if err != nil {
+ // return err
+ //}
+
+ //// Unify mounts from --mount, --volume, --tmpfs.
+ //// Also add mounts + volumes directly from createconfig.
+ //// Start with --volume.
+ //for dest, mount := range volumeMounts {
+ // if _, ok := unifiedMounts[dest]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ // }
+ // unifiedMounts[dest] = mount
+ //}
+ //for dest, volume := range volumeVolumes {
+ // if _, ok := unifiedVolumes[dest]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ // }
+ // unifiedVolumes[dest] = volume
+ //}
+ //// Now --tmpfs
+ //for dest, tmpfs := range tmpfsMounts {
+ // if _, ok := unifiedMounts[dest]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ // }
+ // unifiedMounts[dest] = tmpfs
+ //}
+ //// Now spec mounts and volumes
+ //for _, mount := range config.Mounts {
+ // dest := mount.Destination
+ // if _, ok := unifiedMounts[dest]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ // }
+ // unifiedMounts[dest] = mount
+ //}
+ //for _, volume := range config.NamedVolumes {
+ // dest := volume.Dest
+ // if _, ok := unifiedVolumes[dest]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, dest)
+ // }
+ // unifiedVolumes[dest] = volume
+ //}
+ //
+ //// If requested, add container init binary
+ //if config.Init {
+ // initPath := config.InitPath
+ // if initPath == "" {
+ // rtc, err := runtime.GetConfig()
+ // if err != nil {
+ // return nil, nil, err
+ // }
+ // initPath = rtc.Engine.InitPath
+ // }
+ // initMount, err := config.addContainerInitBinary(initPath)
+ // if err != nil {
+ // return nil, nil, err
+ // }
+ // if _, ok := unifiedMounts[initMount.Destination]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, "conflict with mount added by --init to %q", initMount.Destination)
+ // }
+ // unifiedMounts[initMount.Destination] = initMount
+ //}
+ //
+ //// Before superseding, we need to find volume mounts which conflict with
+ //// named volumes, and vice versa.
+ //// We'll delete the conflicts here as we supersede.
+ //for dest := range unifiedMounts {
+ // if _, ok := baseVolumes[dest]; ok {
+ // delete(baseVolumes, dest)
+ // }
+ //}
+ //for dest := range unifiedVolumes {
+ // if _, ok := baseMounts[dest]; ok {
+ // delete(baseMounts, dest)
+ // }
+ //}
+ //
+ //// Supersede volumes-from/image volumes with unified volumes from above.
+ //// This is an unconditional replacement.
+ //for dest, mount := range unifiedMounts {
+ // baseMounts[dest] = mount
+ //}
+ //for dest, volume := range unifiedVolumes {
+ // baseVolumes[dest] = volume
+ //}
+ //
+ //// If requested, add tmpfs filesystems for read-only containers.
+ //if config.Security.ReadOnlyRootfs && config.Security.ReadOnlyTmpfs {
+ // readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"}
+ // options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"}
+ // for _, dest := range readonlyTmpfs {
+ // if _, ok := baseMounts[dest]; ok {
+ // continue
+ // }
+ // if _, ok := baseVolumes[dest]; ok {
+ // continue
+ // }
+ // localOpts := options
+ // if dest == "/run" {
+ // localOpts = append(localOpts, "noexec", "size=65536k")
+ // } else {
+ // localOpts = append(localOpts, "exec")
+ // }
+ // baseMounts[dest] = spec.Mount{
+ // Destination: dest,
+ // Type: "tmpfs",
+ // Source: "tmpfs",
+ // Options: localOpts,
+ // }
+ // }
+ //}
+ //
+ //// Check for conflicts between named volumes and mounts
+ //for dest := range baseMounts {
+ // if _, ok := baseVolumes[dest]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest)
+ // }
+ //}
+ //for dest := range baseVolumes {
+ // if _, ok := baseMounts[dest]; ok {
+ // return nil, nil, errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest)
+ // }
+ //}
+ //
+ //// Final step: maps to arrays
+ //finalMounts := make([]spec.Mount, 0, len(baseMounts))
+ //for _, mount := range baseMounts {
+ // if mount.Type == TypeBind {
+ // absSrc, err := filepath.Abs(mount.Source)
+ // if err != nil {
+ // return nil, nil, errors.Wrapf(err, "error getting absolute path of %s", mount.Source)
+ // }
+ // mount.Source = absSrc
+ // }
+ // finalMounts = append(finalMounts, mount)
+ //}
+ //finalVolumes := make([]*define.ContainerNamedVolume, 0, len(baseVolumes))
+ //for _, volume := range baseVolumes {
+ // finalVolumes = append(finalVolumes, volume)
+ //}
+
+ //return finalMounts, finalVolumes, nil
+ return nil
+}
+
+// Parse volumes from - a set of containers whose volumes we will mount in.
+// Grab the containers, retrieve any user-created spec mounts and all named
+// volumes, and return a list of them.
+// Conflicts are resolved simply - the last container specified wins.
+// Container names may be suffixed by mount options after a colon.
+// TODO: We should clean these paths if possible
+// TODO deferred baude
+func getVolumesFrom() (map[string]spec.Mount, map[string]*libpod.ContainerNamedVolume, error) { //nolint
+ // Both of these are maps of mount destination to mount type.
+ // We ensure that each destination is only mounted to once in this way.
+ //finalMounts := make(map[string]spec.Mount)
+ //finalNamedVolumes := make(map[string]*define.ContainerNamedVolume)
+ //
+ //for _, vol := range config.VolumesFrom {
+ // var (
+ // options = []string{}
+ // err error
+ // splitVol = strings.SplitN(vol, ":", 2)
+ // )
+ // if len(splitVol) == 2 {
+ // splitOpts := strings.Split(splitVol[1], ",")
+ // for _, checkOpt := range splitOpts {
+ // switch checkOpt {
+ // case "z", "ro", "rw":
+ // // Do nothing, these are valid options
+ // default:
+ // return nil, nil, errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z'", splitVol[1])
+ // }
+ // }
+ //
+ // if options, err = parse.ValidateVolumeOpts(splitOpts); err != nil {
+ // return nil, nil, err
+ // }
+ // }
+ // ctr, err := runtime.LookupContainer(splitVol[0])
+ // if err != nil {
+ // return nil, nil, errors.Wrapf(err, "error looking up container %q for volumes-from", splitVol[0])
+ // }
+ //
+ // logrus.Debugf("Adding volumes from container %s", ctr.ID())
+ //
+ // // Look up the container's user volumes. This gets us the
+ // // destinations of all mounts the user added to the container.
+ // userVolumesArr := ctr.UserVolumes()
+ //
+ // // We're going to need to access them a lot, so convert to a map
+ // // to reduce looping.
+ // // We'll also use the map to indicate if we missed any volumes along the way.
+ // userVolumes := make(map[string]bool)
+ // for _, dest := range userVolumesArr {
+ // userVolumes[dest] = false
+ // }
+ //
+ // // Now we get the container's spec and loop through its volumes
+ // // and append them in if we can find them.
+ // spec := ctr.Spec()
+ // if spec == nil {
+ // return nil, nil, errors.Errorf("error retrieving container %s spec for volumes-from", ctr.ID())
+ // }
+ // for _, mnt := range spec.Mounts {
+ // if mnt.Type != TypeBind {
+ // continue
+ // }
+ // if _, exists := userVolumes[mnt.Destination]; exists {
+ // userVolumes[mnt.Destination] = true
+ //
+ // if len(options) != 0 {
+ // mnt.Options = options
+ // }
+ //
+ // if _, ok := finalMounts[mnt.Destination]; ok {
+ // logrus.Debugf("Overriding mount to %s with new mount from container %s", mnt.Destination, ctr.ID())
+ // }
+ // finalMounts[mnt.Destination] = mnt
+ // }
+ // }
+ //
+ // // We're done with the spec mounts. Add named volumes.
+ // // Add these unconditionally - none of them are automatically
+ // // part of the container, as some spec mounts are.
+ // namedVolumes := ctr.NamedVolumes()
+ // for _, namedVol := range namedVolumes {
+ // if _, exists := userVolumes[namedVol.Dest]; exists {
+ // userVolumes[namedVol.Dest] = true
+ // }
+ //
+ // if len(options) != 0 {
+ // namedVol.Options = options
+ // }
+ //
+ // if _, ok := finalMounts[namedVol.Dest]; ok {
+ // logrus.Debugf("Overriding named volume mount to %s with new named volume from container %s", namedVol.Dest, ctr.ID())
+ // }
+ // finalNamedVolumes[namedVol.Dest] = namedVol
+ // }
+ //
+ // // Check if we missed any volumes
+ // for volDest, found := range userVolumes {
+ // if !found {
+ // logrus.Warnf("Unable to match volume %s from container %s for volumes-from", volDest, ctr.ID())
+ // }
+ // }
+ //}
+ //
+ //return finalMounts, finalNamedVolumes, nil
+ return nil, nil, nil
+}
+
+// getMounts takes user-provided input from the --mount flag and creates OCI
+// spec mounts and Libpod named volumes.
+// podman run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
+// podman run --mount type=tmpfs,target=/dev/shm ...
+// podman run --mount type=volume,source=test-volume, ...
+func getMounts(mounts []string) (map[string]spec.Mount, map[string]*libpod.ContainerNamedVolume, error) { //nolint
+ finalMounts := make(map[string]spec.Mount)
+ finalNamedVolumes := make(map[string]*libpod.ContainerNamedVolume)
+
+ errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=<bind|tmpfs|volume>,[src=<host-dir|volume-name>,]target=<ctr-dir>[,options]")
+
+ // TODO(vrothberg): the manual parsing can be replaced with a regular expression
+ // to allow a more robust parsing of the mount format and to give
+ // precise errors regarding supported format versus supported options.
+ for _, mount := range mounts {
+ arr := strings.SplitN(mount, ",", 2)
+ if len(arr) < 2 {
+ return nil, nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
+ }
+ kv := strings.Split(arr[0], "=")
+ // TODO: type is not explicitly required in Docker.
+ // If not specified, it defaults to "volume".
+ if len(kv) != 2 || kv[0] != "type" {
+ return nil, nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
+ }
+
+ tokens := strings.Split(arr[1], ",")
+ switch kv[1] {
+ case TypeBind:
+ mount, err := getBindMount(tokens)
+ if err != nil {
+ return nil, nil, err
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
+ }
+ finalMounts[mount.Destination] = mount
+ case TypeTmpfs:
+ mount, err := getTmpfsMount(tokens)
+ if err != nil {
+ return nil, nil, err
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
+ }
+ finalMounts[mount.Destination] = mount
+ case "volume":
+ volume, err := getNamedVolume(tokens)
+ if err != nil {
+ return nil, nil, err
+ }
+ if _, ok := finalNamedVolumes[volume.Dest]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, volume.Dest)
+ }
+ finalNamedVolumes[volume.Dest] = volume
+ default:
+ return nil, nil, errors.Errorf("invalid filesystem type %q", kv[1])
+ }
+ }
+
+ return finalMounts, finalNamedVolumes, nil
+}
+
+// Parse a single bind mount entry from the --mount flag.
+func getBindMount(args []string) (spec.Mount, error) { //nolint
+ newMount := spec.Mount{
+ Type: TypeBind,
+ }
+
+ var setSource, setDest, setRORW, setSuid, setDev, setExec, setRelabel bool
+
+ for _, val := range args {
+ kv := strings.Split(val, "=")
+ switch kv[0] {
+ case "bind-nonrecursive":
+ newMount.Options = append(newMount.Options, "bind")
+ case "ro", "rw":
+ if setRORW {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' or 'rw' options more than once")
+ }
+ setRORW = true
+ // Can be formatted as one of:
+ // ro
+ // ro=[true|false]
+ // rw
+ // rw=[true|false]
+ switch len(kv) {
+ case 1:
+ newMount.Options = append(newMount.Options, kv[0])
+ case 2:
+ switch strings.ToLower(kv[1]) {
+ case "true":
+ newMount.Options = append(newMount.Options, kv[0])
+ case "false":
+ // Set the opposite only for rw
+ // ro's opposite is the default
+ if kv[0] == "rw" {
+ newMount.Options = append(newMount.Options, "ro")
+ }
+ default:
+ return newMount, errors.Wrapf(optionArgError, "%s must be set to true or false, instead received %q", kv[0], kv[1])
+ }
+ default:
+ return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val)
+ }
+ case "nosuid", "suid":
+ if setSuid {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
+ }
+ setSuid = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "nodev", "dev":
+ if setDev {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
+ }
+ setDev = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "noexec", "exec":
+ if setExec {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
+ }
+ setExec = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z":
+ newMount.Options = append(newMount.Options, kv[0])
+ case "bind-propagation":
+ if len(kv) == 1 {
+ return newMount, errors.Wrapf(optionArgError, kv[0])
+ }
+ newMount.Options = append(newMount.Options, kv[1])
+ case "src", "source":
+ if len(kv) == 1 {
+ return newMount, errors.Wrapf(optionArgError, kv[0])
+ }
+ if err := parse.ValidateVolumeHostDir(kv[1]); err != nil {
+ return newMount, err
+ }
+ newMount.Source = kv[1]
+ setSource = true
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, errors.Wrapf(optionArgError, kv[0])
+ }
+ if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
+ return newMount, err
+ }
+ newMount.Destination = filepath.Clean(kv[1])
+ setDest = true
+ case "relabel":
+ if setRelabel {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'relabel' option more than once")
+ }
+ setRelabel = true
+ if len(kv) != 2 {
+ return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0])
+ }
+ switch kv[1] {
+ case "private":
+ newMount.Options = append(newMount.Options, "z")
+ case "shared":
+ newMount.Options = append(newMount.Options, "Z")
+ default:
+ return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0])
+ }
+ default:
+ return newMount, errors.Wrapf(util.ErrBadMntOption, kv[0])
+ }
+ }
+
+ if !setDest {
+ return newMount, noDestError
+ }
+
+ if !setSource {
+ newMount.Source = newMount.Destination
+ }
+
+ options, err := parse.ValidateVolumeOpts(newMount.Options)
+ if err != nil {
+ return newMount, err
+ }
+ newMount.Options = options
+ return newMount, nil
+}
+
+// Parse a single tmpfs mount entry from the --mount flag
+func getTmpfsMount(args []string) (spec.Mount, error) { //nolint
+ newMount := spec.Mount{
+ Type: TypeTmpfs,
+ Source: TypeTmpfs,
+ }
+
+ var setDest, setRORW, setSuid, setDev, setExec, setTmpcopyup bool
+
+ for _, val := range args {
+ kv := strings.Split(val, "=")
+ switch kv[0] {
+ case "tmpcopyup", "notmpcopyup":
+ if setTmpcopyup {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'tmpcopyup' and 'notmpcopyup' options more than once")
+ }
+ setTmpcopyup = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "ro", "rw":
+ if setRORW {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
+ }
+ setRORW = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "nosuid", "suid":
+ if setSuid {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
+ }
+ setSuid = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "nodev", "dev":
+ if setDev {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
+ }
+ setDev = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "noexec", "exec":
+ if setExec {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
+ }
+ setExec = true
+ newMount.Options = append(newMount.Options, kv[0])
+ case "tmpfs-mode":
+ if len(kv) == 1 {
+ return newMount, errors.Wrapf(optionArgError, kv[0])
+ }
+ newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
+ case "tmpfs-size":
+ if len(kv) == 1 {
+ return newMount, errors.Wrapf(optionArgError, kv[0])
+ }
+ newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
+ case "src", "source":
+ return newMount, errors.Errorf("source is not supported with tmpfs mounts")
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, errors.Wrapf(optionArgError, kv[0])
+ }
+ if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
+ return newMount, err
+ }
+ newMount.Destination = filepath.Clean(kv[1])
+ setDest = true
+ default:
+ return newMount, errors.Wrapf(util.ErrBadMntOption, kv[0])
+ }
+ }
+
+ if !setDest {
+ return newMount, noDestError
+ }
+
+ return newMount, nil
+}
+
+// Parse a single volume mount entry from the --mount flag.
+// Note that the volume-label option for named volumes is currently NOT supported.
+// TODO: add support for --volume-label
+func getNamedVolume(args []string) (*libpod.ContainerNamedVolume, error) { //nolint
+ newVolume := new(libpod.ContainerNamedVolume)
+
+ var setSource, setDest, setRORW, setSuid, setDev, setExec bool
+
+ for _, val := range args {
+ kv := strings.Split(val, "=")
+ switch kv[0] {
+ case "ro", "rw":
+ if setRORW {
+ return nil, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
+ }
+ setRORW = true
+ newVolume.Options = append(newVolume.Options, kv[0])
+ case "nosuid", "suid":
+ if setSuid {
+ return nil, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once")
+ }
+ setSuid = true
+ newVolume.Options = append(newVolume.Options, kv[0])
+ case "nodev", "dev":
+ if setDev {
+ return nil, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once")
+ }
+ setDev = true
+ newVolume.Options = append(newVolume.Options, kv[0])
+ case "noexec", "exec":
+ if setExec {
+ return nil, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once")
+ }
+ setExec = true
+ newVolume.Options = append(newVolume.Options, kv[0])
+ case "volume-label":
+ return nil, errors.Errorf("the --volume-label option is not presently implemented")
+ case "src", "source":
+ if len(kv) == 1 {
+ return nil, errors.Wrapf(optionArgError, kv[0])
+ }
+ newVolume.Name = kv[1]
+ setSource = true
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return nil, errors.Wrapf(optionArgError, kv[0])
+ }
+ if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
+ return nil, err
+ }
+ newVolume.Dest = filepath.Clean(kv[1])
+ setDest = true
+ default:
+ return nil, errors.Wrapf(util.ErrBadMntOption, kv[0])
+ }
+ }
+
+ if !setSource {
+ return nil, errors.Errorf("must set source volume")
+ }
+ if !setDest {
+ return nil, noDestError
+ }
+
+ return newVolume, nil
+}
+
+func getVolumeMounts(vols []string) (map[string]spec.Mount, map[string]*libpod.ContainerNamedVolume, error) { //nolint
+ mounts := make(map[string]spec.Mount)
+ volumes := make(map[string]*libpod.ContainerNamedVolume)
+
+ volumeFormatErr := errors.Errorf("incorrect volume format, should be [host-dir:]ctr-dir[:option]")
+
+ for _, vol := range vols {
+ var (
+ options []string
+ src string
+ dest string
+ err error
+ )
+
+ splitVol := strings.Split(vol, ":")
+ if len(splitVol) > 3 {
+ return nil, nil, errors.Wrapf(volumeFormatErr, vol)
+ }
+
+ src = splitVol[0]
+ if len(splitVol) == 1 {
+ // This is an anonymous named volume. Only thing given
+ // is destination.
+ // Name/source will be blank, and populated by libpod.
+ src = ""
+ dest = splitVol[0]
+ } else if len(splitVol) > 1 {
+ dest = splitVol[1]
+ }
+ if len(splitVol) > 2 {
+ if options, err = parse.ValidateVolumeOpts(strings.Split(splitVol[2], ",")); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Do not check source dir for anonymous volumes
+ if len(splitVol) > 1 {
+ if err := parse.ValidateVolumeHostDir(src); err != nil {
+ return nil, nil, err
+ }
+ }
+ if err := parse.ValidateVolumeCtrDir(dest); err != nil {
+ return nil, nil, err
+ }
+
+ cleanDest := filepath.Clean(dest)
+
+ if strings.HasPrefix(src, "/") || strings.HasPrefix(src, ".") {
+ // This is not a named volume
+ newMount := spec.Mount{
+ Destination: cleanDest,
+ Type: string(TypeBind),
+ Source: src,
+ Options: options,
+ }
+ if _, ok := mounts[newMount.Destination]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, newMount.Destination)
+ }
+ mounts[newMount.Destination] = newMount
+ } else {
+ // This is a named volume
+ newNamedVol := new(libpod.ContainerNamedVolume)
+ newNamedVol.Name = src
+ newNamedVol.Dest = cleanDest
+ newNamedVol.Options = options
+
+ if _, ok := volumes[newNamedVol.Dest]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, newNamedVol.Dest)
+ }
+ volumes[newNamedVol.Dest] = newNamedVol
+ }
+
+ logrus.Debugf("User mount %s:%s options %v", src, dest, options)
+ }
+
+ return mounts, volumes, nil
+}
+
+// Get mounts for container's image volumes
+// TODO deferred baude
+func getImageVolumes() (map[string]spec.Mount, map[string]*libpod.ContainerNamedVolume, error) { //nolint
+ //mounts := make(map[string]spec.Mount)
+ //volumes := make(map[string]*define.ContainerNamedVolume)
+ //
+ //if config.ImageVolumeType == "ignore" {
+ // return mounts, volumes, nil
+ //}
+ //
+ //for vol := range config.BuiltinImgVolumes {
+ // cleanDest := filepath.Clean(vol)
+ // logrus.Debugf("Adding image volume at %s", cleanDest)
+ // if config.ImageVolumeType == "tmpfs" {
+ // // Tmpfs image volumes are handled as mounts
+ // mount := spec.Mount{
+ // Destination: cleanDest,
+ // Source: TypeTmpfs,
+ // Type: TypeTmpfs,
+ // Options: []string{"rprivate", "rw", "nodev", "exec"},
+ // }
+ // mounts[cleanDest] = mount
+ // } else {
+ // // Anonymous volumes have no name.
+ // namedVolume := new(define.ContainerNamedVolume)
+ // namedVolume.Options = []string{"rprivate", "rw", "nodev", "exec"}
+ // namedVolume.Dest = cleanDest
+ // volumes[cleanDest] = namedVolume
+ // }
+ //}
+ //
+ //return mounts, volumes, nil
+ return nil, nil, nil
+}
+
+// GetTmpfsMounts creates spec.Mount structs for user-requested tmpfs mounts
+func getTmpfsMounts(mounts []string) (map[string]spec.Mount, error) { //nolint
+ m := make(map[string]spec.Mount)
+ for _, i := range mounts {
+ // Default options if nothing passed
+ var options []string
+ spliti := strings.Split(i, ":")
+ destPath := spliti[0]
+ if err := parse.ValidateVolumeCtrDir(spliti[0]); err != nil {
+ return nil, err
+ }
+ if len(spliti) > 1 {
+ options = strings.Split(spliti[1], ",")
+ }
+
+ if _, ok := m[destPath]; ok {
+ return nil, errors.Wrapf(errDuplicateDest, destPath)
+ }
+
+ mount := spec.Mount{
+ Destination: filepath.Clean(destPath),
+ Type: string(TypeTmpfs),
+ Options: options,
+ Source: string(TypeTmpfs),
+ }
+ m[destPath] = mount
+ }
+ return m, nil
+}
+
+// AddContainerInitBinary adds the init binary specified by path iff the
+// container will run in a private PID namespace that is not shared with the
+// host or another pre-existing container, where an init-like process is
+// already running.
+//
+// Note that AddContainerInitBinary prepends "/dev/init" "--" to the command
+// to execute the bind-mounted binary as PID 1.
+// TODO this needs to be worked on to work in new env
+func addContainerInitBinary(path string) (spec.Mount, error) { //nolint
+ mount := spec.Mount{
+ Destination: "/dev/init",
+ Type: TypeBind,
+ Source: path,
+ Options: []string{TypeBind, "ro"},
+ }
+
+ //if path == "" {
+ // return mount, fmt.Errorf("please specify a path to the container-init binary")
+ //}
+ //if !config.Pid.PidMode.IsPrivate() {
+ // return mount, fmt.Errorf("cannot add init binary as PID 1 (PID namespace isn't private)")
+ //}
+ //if config.Systemd {
+ // return mount, fmt.Errorf("cannot use container-init binary with systemd")
+ //}
+ //if _, err := os.Stat(path); os.IsNotExist(err) {
+ // return mount, errors.Wrap(err, "container-init binary not found on the host")
+ //}
+ //config.Command = append([]string{"/dev/init", "--"}, config.Command...)
+ return mount, nil
+}
+
+// Supersede existing mounts in the spec with new, user-specified mounts.
+// TODO: Should we unmount subtree mounts? E.g., if /tmp/ is mounted by
+// one mount, and we already have /tmp/a and /tmp/b, should we remove
+// the /tmp/a and /tmp/b mounts in favor of the more general /tmp?
+func SupercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.Mount {
+ if len(mounts) > 0 {
+ // If we have overlappings mounts, remove them from the spec in favor of
+ // the user-added volume mounts
+ destinations := make(map[string]bool)
+ for _, mount := range mounts {
+ destinations[path.Clean(mount.Destination)] = true
+ }
+ // Copy all mounts from spec to defaultMounts, except for
+ // - mounts overridden by a user supplied mount;
+ // - all mounts under /dev if a user supplied /dev is present;
+ mountDev := destinations["/dev"]
+ for _, mount := range configMount {
+ if _, ok := destinations[path.Clean(mount.Destination)]; !ok {
+ if mountDev && strings.HasPrefix(mount.Destination, "/dev/") {
+ // filter out everything under /dev if /dev is user-mounted
+ continue
+ }
+
+ logrus.Debugf("Adding mount %s", mount.Destination)
+ mounts = append(mounts, mount)
+ }
+ }
+ return mounts
+ }
+ return configMount
+}
+
+func InitFSMounts(mounts []spec.Mount) error {
+ for i, m := range mounts {
+ switch {
+ case m.Type == TypeBind:
+ opts, err := util.ProcessOptions(m.Options, false, m.Source)
+ if err != nil {
+ return err
+ }
+ mounts[i].Options = opts
+ case m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev":
+ opts, err := util.ProcessOptions(m.Options, true, "")
+ if err != nil {
+ return err
+ }
+ mounts[i].Options = opts
+ }
+ }
+ return nil
+}
diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go
index d21800bc3..329a7c913 100644
--- a/pkg/util/mountOpts.go
+++ b/pkg/util/mountOpts.go
@@ -13,19 +13,17 @@ var (
ErrDupeMntOption = errors.Errorf("duplicate mount option passed")
)
-// DefaultMountOptions sets default mount options for ProcessOptions.
-type DefaultMountOptions struct {
- Noexec bool
- Nosuid bool
- Nodev bool
+type defaultMountOptions struct {
+ noexec bool
+ nosuid bool
+ nodev bool
}
// ProcessOptions parses the options for a bind or tmpfs mount and ensures that
// they are sensible and follow convention. The isTmpfs variable controls
// whether extra, tmpfs-specific options will be allowed.
-// The defaults variable controls default mount options that will be set. If it
-// is not included, they will be set unconditionally.
-func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOptions) ([]string, error) {
+// The sourcePath variable, if not empty, contains a bind mount source.
+func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) {
var (
foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ bool
)
@@ -122,13 +120,17 @@ func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOption
if !foundProp {
newOptions = append(newOptions, "rprivate")
}
- if !foundExec && (defaults == nil || defaults.Noexec) {
+ defaults, err := getDefaultMountOptions(sourcePath)
+ if err != nil {
+ return nil, err
+ }
+ if !foundExec && defaults.noexec {
newOptions = append(newOptions, "noexec")
}
- if !foundSuid && (defaults == nil || defaults.Nosuid) {
+ if !foundSuid && defaults.nosuid {
newOptions = append(newOptions, "nosuid")
}
- if !foundDev && (defaults == nil || defaults.Nodev) {
+ if !foundDev && defaults.nodev {
newOptions = append(newOptions, "nodev")
}
if isTmpfs && !foundCopyUp {
diff --git a/pkg/util/mountOpts_linux.go b/pkg/util/mountOpts_linux.go
new file mode 100644
index 000000000..3eac4dd25
--- /dev/null
+++ b/pkg/util/mountOpts_linux.go
@@ -0,0 +1,23 @@
+package util
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func getDefaultMountOptions(path string) (defaultMountOptions, error) {
+ opts := defaultMountOptions{true, true, true}
+ if path == "" {
+ return opts, nil
+ }
+ var statfs unix.Statfs_t
+ if e := unix.Statfs(path, &statfs); e != nil {
+ return opts, &os.PathError{Op: "statfs", Path: path, Err: e}
+ }
+ opts.nodev = (statfs.Flags&unix.MS_NODEV == unix.MS_NODEV)
+ opts.noexec = (statfs.Flags&unix.MS_NOEXEC == unix.MS_NOEXEC)
+ opts.nosuid = (statfs.Flags&unix.MS_NOSUID == unix.MS_NOSUID)
+
+ return opts, nil
+}
diff --git a/pkg/util/mountOpts_other.go b/pkg/util/mountOpts_other.go
new file mode 100644
index 000000000..6a34942e5
--- /dev/null
+++ b/pkg/util/mountOpts_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package util
+
+func getDefaultMountOptions(path string) (opts defaultMountOptions, err error) {
+ return
+}
diff --git a/pkg/varlinkapi/attach.go b/pkg/varlinkapi/attach.go
index 94f4d653e..34f351669 100644
--- a/pkg/varlinkapi/attach.go
+++ b/pkg/varlinkapi/attach.go
@@ -16,7 +16,7 @@ import (
"k8s.io/client-go/tools/remotecommand"
)
-func setupStreams(call iopodman.VarlinkCall) (*bufio.Reader, *bufio.Writer, *io.PipeReader, *io.PipeWriter, *libpod.AttachStreams) {
+func setupStreams(call iopodman.VarlinkCall) (*bufio.Reader, *bufio.Writer, *io.PipeReader, *io.PipeWriter, *define.AttachStreams) {
// These are the varlink sockets
reader := call.Call.Reader
@@ -30,7 +30,7 @@ func setupStreams(call iopodman.VarlinkCall) (*bufio.Reader, *bufio.Writer, *io.
// TODO if runc ever starts passing stderr, we can too
// stderrWriter := NewVirtWriteCloser(writer, ToStderr)
- streams := libpod.AttachStreams{
+ streams := define.AttachStreams{
OutputStream: stdoutWriter,
InputStream: bufio.NewReader(pr),
// Runc eats the error stream
@@ -117,7 +117,7 @@ func (i *LibpodAPI) Attach(call iopodman.VarlinkCall, name string, detachKeys st
return call.Writer.Flush()
}
-func attach(ctr *libpod.Container, streams *libpod.AttachStreams, detachKeys string, resize chan remotecommand.TerminalSize, errChan chan error) error {
+func attach(ctr *libpod.Container, streams *define.AttachStreams, detachKeys string, resize chan remotecommand.TerminalSize, errChan chan error) error {
go func() {
if err := ctr.Attach(streams, detachKeys, resize); err != nil {
errChan <- err
@@ -127,7 +127,7 @@ func attach(ctr *libpod.Container, streams *libpod.AttachStreams, detachKeys str
return attachError
}
-func startAndAttach(ctr *libpod.Container, streams *libpod.AttachStreams, detachKeys string, resize chan remotecommand.TerminalSize, errChan chan error) error {
+func startAndAttach(ctr *libpod.Container, streams *define.AttachStreams, detachKeys string, resize chan remotecommand.TerminalSize, errChan chan error) error {
var finalErr error
attachChan, err := ctr.StartAndAttach(getContext(), streams, detachKeys, resize, false)
if err != nil {