diff options
47 files changed, 1093 insertions, 459 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index e53788c6c..14c64b412 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -36,10 +36,10 @@ env: ### FEDORA_NAME: "fedora-32" PRIOR_FEDORA_NAME: "fedora-31" - UBUNTU_NAME: "ubuntu-19" - PRIOR_UBUNTU_NAME: "ubuntu-18" + UBUNTU_NAME: "ubuntu-20" + PRIOR_UBUNTU_NAME: "ubuntu-19" - _BUILT_IMAGE_SUFFIX: "libpod-6224667180531712" # From the packer output of 'build_vm_images_script' + _BUILT_IMAGE_SUFFIX: "libpod-6268069335007232" # From the packer output of 'build_vm_images_script' FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}" diff --git a/Dockerfile.ubuntu b/Dockerfile.ubuntu index 3a8f837b9..160c1469c 100644 --- a/Dockerfile.ubuntu +++ b/Dockerfile.ubuntu @@ -1,5 +1,5 @@ # Must resemble $UBUNTU_BASE_IMAGE in ./contrib/cirrus/lib.sh -FROM ubuntu:latest +FROM ubuntu:20.04 # This container image is intended for building and testing libpod # from inside a container environment. It is assumed that the source @@ -22,6 +22,7 @@ ETCDIR ?= /etc TMPFILESDIR ?= ${PREFIX}/lib/tmpfiles.d SYSTEMDDIR ?= ${PREFIX}/lib/systemd/system USERSYSTEMDDIR ?= ${PREFIX}/lib/systemd/user +REMOTETAGS := !ABISupport remoteclient exclude_graphdriver_btrfs btrfs_noversion exclude_graphdriver_devicemapper containers_image_openpgp BUILDTAGS ?= \ $(shell hack/apparmor_tag.sh) \ $(shell hack/btrfs_installed_tag.sh) \ @@ -189,11 +190,11 @@ podman: bin/podman .PHONY: bin/podman-remote bin/podman-remote: .gopathok $(SOURCES) go.mod go.sum $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote environment - $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "!ABISupport remoteclient" -o $@ $(PROJECT)/cmd/podman + $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "${REMOTETAGS}" -o $@ $(PROJECT)/cmd/podman .PHONY: bin/podman-remote-static podman-remote-static: bin/podman-remote-static - CGO_ENABLED=0 $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN_STATIC)' -tags "!ABISupport containers_image_openpgp remoteclient" -o bin/podman-remote-static $(PROJECT)/cmd/podman + CGO_ENABLED=0 $(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN_STATIC)' -tags "${REMOTETAGS}" -o bin/podman-remote-static $(PROJECT)/cmd/podman .PHONY: podman-remote podman-remote: bin/podman-remote @@ -207,7 +208,7 @@ podman.msi: podman-remote podman-remote-windows install-podman-remote-windows-do podman-remote-%: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build podman for a specific GOOS $(eval BINSFX := $(shell test "$*" != "windows" || echo ".exe")) - CGO_ENABLED=0 GOOS=$* $(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@$(BINSFX) $(PROJECT)/cmd/podman + CGO_ENABLED=0 GOOS=$* $(GO_BUILD) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "${REMOTETAGS}" -o bin/$@$(BINSFX) $(PROJECT)/cmd/podman local-cross: $(CROSS_BUILD_TARGETS) ## Cross local compilation @@ -282,11 +283,11 @@ dbuild: libpodimage .PHONY: dbuild-podman-remote dbuild-podman-remote: libpodimage - ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} go build -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS) remoteclient" -o bin/podman-remote $(PROJECT)/cmd/podman + ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} go build -ldflags '$(LDFLAGS_PODMAN)' -tags "$(REMOTETAGS)" -o bin/podman-remote $(PROJECT)/cmd/podman .PHONY: dbuild-podman-remote-darwin dbuild-podman-remote-darwin: libpodimage - ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} env GOOS=darwin go build -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/podman-remote-darwin $(PROJECT)/cmd/podman + ${CONTAINER_RUNTIME} run --name=${LIBPOD_INSTANCE} --privileged -v ${PWD}:/go/src/${PROJECT} --rm ${LIBPOD_IMAGE} env GOOS=darwin go build -ldflags '$(LDFLAGS_PODMAN)' -tags "${REMOTETAGS}" -o bin/podman-remote-darwin $(PROJECT)/cmd/podman .PHONY: test test: libpodimage ## Run tests on built image @@ -329,7 +330,7 @@ ginkgo: .PHONY: ginkgo-remote ginkgo-remote: - ginkgo -v $(TESTFLAGS) -tags "$(BUILDTAGS) remoteclient" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor test/e2e/. + ginkgo -v $(TESTFLAGS) -tags "$(REMOTETAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor test/e2e/. .PHONY: endpoint endpoint: diff --git a/cmd/podman/containers/attach.go b/cmd/podman/containers/attach.go index 119b47d3f..9f29d1664 100644 --- a/cmd/podman/containers/attach.go +++ b/cmd/podman/containers/attach.go @@ -52,14 +52,14 @@ func attachFlags(flags *pflag.FlagSet) { func init() { registry.Commands = append(registry.Commands, registry.CliCommand{ - Mode: []entities.EngineMode{entities.ABIMode}, + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, Command: attachCommand, }) flags := attachCommand.Flags() attachFlags(flags) registry.Commands = append(registry.Commands, registry.CliCommand{ - Mode: []entities.EngineMode{entities.ABIMode}, + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, Command: containerAttachCommand, Parent: containerCmd, }) diff --git a/cmd/podman/containers/checkpoint.go b/cmd/podman/containers/checkpoint.go index 7259ed38b..c4723af21 100644 --- a/cmd/podman/containers/checkpoint.go +++ b/cmd/podman/containers/checkpoint.go @@ -45,7 +45,7 @@ func init() { }) flags := checkpointCommand.Flags() flags.BoolVarP(&checkpointOptions.Keep, "keep", "k", false, "Keep all temporary checkpoint files") - flags.BoolVarP(&checkpointOptions.LeaveRuninng, "leave-running", "R", false, "Leave the container running after writing checkpoint to disk") + flags.BoolVarP(&checkpointOptions.LeaveRunning, "leave-running", "R", false, "Leave the container running after writing checkpoint to disk") flags.BoolVar(&checkpointOptions.TCPEstablished, "tcp-established", false, "Checkpoint a container with established TCP connections") flags.BoolVarP(&checkpointOptions.All, "all", "a", false, "Checkpoint all running containers") flags.BoolVarP(&checkpointOptions.Latest, "latest", "l", false, "Act on the latest container podman is aware of") diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go index 5f3ea9ef4..2298691a9 100644 --- a/cmd/podman/containers/run.go +++ b/cmd/podman/containers/run.go @@ -66,14 +66,14 @@ func runFlags(flags *pflag.FlagSet) { } func init() { registry.Commands = append(registry.Commands, registry.CliCommand{ - Mode: []entities.EngineMode{entities.ABIMode}, + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, Command: runCommand, }) flags := runCommand.Flags() runFlags(flags) registry.Commands = append(registry.Commands, registry.CliCommand{ - Mode: []entities.EngineMode{entities.ABIMode}, + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, Command: containerRunCommand, Parent: containerCmd, }) diff --git a/cmd/podman/containers/start.go b/cmd/podman/containers/start.go index ce78d24ed..751fec65f 100644 --- a/cmd/podman/containers/start.go +++ b/cmd/podman/containers/start.go @@ -53,14 +53,14 @@ func startFlags(flags *pflag.FlagSet) { } func init() { registry.Commands = append(registry.Commands, registry.CliCommand{ - Mode: []entities.EngineMode{entities.ABIMode}, + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, Command: startCommand, }) flags := startCommand.Flags() startFlags(flags) registry.Commands = append(registry.Commands, registry.CliCommand{ - Mode: []entities.EngineMode{entities.ABIMode}, + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, Command: containerStartCommand, Parent: containerCmd, }) diff --git a/cmd/podman/system/events.go b/cmd/podman/system/events.go index 6aae62dc0..27e80138e 100644 --- a/cmd/podman/system/events.go +++ b/cmd/podman/system/events.go @@ -5,6 +5,7 @@ import ( "context" "html/template" "os" + "strings" "github.com/containers/buildah/pkg/formats" "github.com/containers/libpod/cmd/podman/registry" @@ -54,6 +55,9 @@ func eventsCmd(cmd *cobra.Command, args []string) error { eventsError error tmpl *template.Template ) + if strings.Join(strings.Fields(eventFormat), "") == "{{json.}}" { + eventFormat = formats.JSONString + } if eventFormat != formats.JSONString { tmpl, err = template.New("events").Parse(eventFormat) if err != nil { diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index c0dd8cfc5..cc5a3ffa7 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -63,11 +63,12 @@ CIRRUS_BUILD_ID=${CIRRUS_BUILD_ID:-$RANDOM$(date +%s)} # must be short and uniq PACKER_VER="1.4.2" # CSV of cache-image names to build (see $PACKER_BASE/libpod_images.json) -# Base-images rarely change, define them here so they're out of the way. -export PACKER_BUILDS="${PACKER_BUILDS:-ubuntu-18,ubuntu-19,fedora-32,fedora-31}" -# Manually produced base-image names (see $SCRIPT_BASE/README.md) -export UBUNTU_BASE_IMAGE="ubuntu-1910-eoan-v20200211" -export PRIOR_UBUNTU_BASE_IMAGE="ubuntu-1804-bionic-v20200218" +# List of cache imaes to build for 'CI:IMG' mode via build_vm_images.sh +# Exists to support manual single-image building in case of emergency +export PACKER_BUILDS="${PACKER_BUILDS:-ubuntu-20,ubuntu-19,fedora-32,fedora-31}" +# Google cloud provides these, we just make copies (see $SCRIPT_BASE/README.md) for use +export UBUNTU_BASE_IMAGE="ubuntu-2004-focal-v20200506" +export PRIOR_UBUNTU_BASE_IMAGE="ubuntu-1910-eoan-v20200211" # Manually produced base-image names (see $SCRIPT_BASE/README.md) export FEDORA_BASE_IMAGE="fedora-cloud-base-32-1-6-1588257430" export PRIOR_FEDORA_BASE_IMAGE="fedora-cloud-base-31-1-9-1588257430" diff --git a/contrib/cirrus/packer/libpod_images.yml b/contrib/cirrus/packer/libpod_images.yml index e33ad775e..754626a2e 100644 --- a/contrib/cirrus/packer/libpod_images.yml +++ b/contrib/cirrus/packer/libpod_images.yml @@ -29,7 +29,7 @@ sensitive-variables: builders: # v----- is a YAML anchor, allows referencing this object by name (below) - &gce_hosted_image - name: 'ubuntu-19' + name: 'ubuntu-20' type: 'googlecompute' image_name: '{{build_name}}{{user `BUILT_IMAGE_SUFFIX`}}' image_family: '{{build_name}}-cache' @@ -46,7 +46,7 @@ builders: # v----- is a YAML alias, allows partial re-use of the anchor object - <<: *gce_hosted_image - name: 'ubuntu-18' + name: 'ubuntu-19' source_image: '{{user `PRIOR_UBUNTU_BASE_IMAGE`}}' source_image_family: 'prior-ubuntu-base' diff --git a/contrib/cirrus/packer/ubuntu_packaging.sh b/contrib/cirrus/packer/ubuntu_packaging.sh index b57bc95e9..fd0280230 100644 --- a/contrib/cirrus/packer/ubuntu_packaging.sh +++ b/contrib/cirrus/packer/ubuntu_packaging.sh @@ -26,12 +26,6 @@ source /usr/share/automation/environment $LILTO ooe.sh $SUDOAPTADD ppa:criu/ppa -# Install newer version of golang -if [[ "$OS_RELEASE_VER" -eq "18" ]] -then - $LILTO ooe.sh $SUDOAPTADD ppa:longsleep/golang-backports -fi - echo "Configuring/Instaling deps from Open build server" VERSION_ID=$(source /etc/os-release; echo $VERSION_ID) echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_$VERSION_ID/ /" \ @@ -45,7 +39,9 @@ INSTALL_PACKAGES=(\ autoconf automake bash-completion + bats bison + btrfs-progs build-essential buildah bzip2 @@ -60,6 +56,7 @@ INSTALL_PACKAGES=(\ e2fslibs-dev emacs-nox file + fuse3 gawk gcc gettext @@ -71,11 +68,13 @@ INSTALL_PACKAGES=(\ jq libaio-dev libapparmor-dev + libbtrfs-dev libcap-dev libdevmapper-dev libdevmapper1.02.1 libfuse-dev libfuse2 + libfuse3-dev libglib2.0-dev libgpgme11-dev liblzma-dev @@ -99,8 +98,6 @@ INSTALL_PACKAGES=(\ podman protobuf-c-compiler protobuf-compiler - python-future - python-minimal python-protobuf python3-dateutil python3-pip @@ -118,29 +115,16 @@ INSTALL_PACKAGES=(\ vim wget xz-utils - yum-utils zip zlib1g-dev ) -if [[ $OS_RELEASE_VER -ge 19 ]] -then - INSTALL_PACKAGES+=(\ - bats - btrfs-progs - fuse3 - libbtrfs-dev - libfuse3-dev - ) -else - echo "Downloading version of bats with fix for a \$IFS related bug in 'run' command" - cd /tmp - BATS_URL='http://launchpadlibrarian.net/438140887/bats_1.1.0+git104-g1c83a1b-1_all.deb' - curl -L -O "$BATS_URL" - cd - +# These aren't resolvable on Ubuntu 20 +if [[ "$OS_RELEASE_VER" -le 19 ]]; then INSTALL_PACKAGES+=(\ - /tmp/$(basename $BATS_URL) - btrfs-tools + python-future + python-minimal + yum-utils ) fi diff --git a/docs/source/markdown/podman-events.1.md b/docs/source/markdown/podman-events.1.md index bb1923574..a05047684 100644 --- a/docs/source/markdown/podman-events.1.md +++ b/docs/source/markdown/podman-events.1.md @@ -142,7 +142,7 @@ $ sudo podman events --since 5m Show Podman events in JSON Lines format ``` -events --format json +$ podman events --format json {"ID":"683b0909d556a9c02fa8cd2b61c3531a965db42158627622d1a67b391964d519","Image":"localhost/myshdemo:latest","Name":"agitated_diffie","Status":"cleanup","Time":"2019-04-27T22:47:00.849932843-04:00","Type":"container"} {"ID":"a0f8ab051bfd43f9c5141a8a2502139707e4b38d98ac0872e57c5315381e88ad","Image":"docker.io/library/alpine:latest","Name":"friendly_tereshkova","Status":"unmount","Time":"2019-04-28T13:43:38.063017276-04:00","Type":"container"} ``` diff --git a/hack/get_ci_vm.sh b/hack/get_ci_vm.sh index 7e31c19c6..1d48f0996 100755 --- a/hack/get_ci_vm.sh +++ b/hack/get_ci_vm.sh @@ -67,13 +67,6 @@ delvm() { cleanup } -image_hints() { - _BIS=$(egrep -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' "$LIBPODROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]') - egrep '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \ - "$LIBPODROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \ - sed -r -e "s/\\\$[{]_BUILT_IMAGE_SUFFIX[}]/$_BIS/" | sort -u -} - show_usage() { echo -e "\n${RED}ERROR: $1${NOR}" echo -e "${YEL}Usage: $(basename $0) [-m <SPECIALMODE>] [-u <ROOTLESS_USER> ] <image_name>${NOR}" @@ -90,17 +83,34 @@ show_usage() { } get_env_vars() { - python -c ' -import yaml + # Deal with both YAML and embedded shell-like substitutions in values + # if substitution fails, fall back to printing naked env. var as-is. + python3 -c ' +import yaml,re env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"] -keys=[k for k in env if "ENCRYPTED" not in str(env[k])] +dollar_env_var=re.compile(r"\$(\w+)") +dollarcurly_env_var=re.compile(r"\$\{(\w+)\}") +class ReIterKey(dict): + def __missing__(self, key): + # Cirrus-CI provides some runtime-only env. vars. Avoid + # breaking this hack-script if/when any are present in YAML + return "${0}".format(key) +rep=r"{\1}" # Convert env vars markup to -> str.format_map(re_iter_key) markup +out=ReIterKey() for k,v in env.items(): v=str(v) - if "ENCRYPTED" not in v and "ADD_SECOND_PARTITION" not in v: - print("{0}=\"{1}\"".format(k, v)), + if "ENCRYPTED" not in v: + out[k]=dollar_env_var.sub(rep, dollarcurly_env_var.sub(rep, v)) +for k,v in out.items(): + print("{0}=\"{1}\"".format(k, v.format_map(out))) ' } +image_hints() { + get_env_vars | fgrep '_CACHE_IMAGE_NAME' | awk -F "=" '{print $2}' +} + + parse_args(){ echo -e "$USAGE_WARNING" diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 33ff0720f..21d55bf77 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -695,7 +695,10 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID()) } - ctrDepsBkt := volDB.Bucket(volDependenciesBkt) + ctrDepsBkt, err := volDB.CreateBucketIfNotExists(volDependenciesBkt) + if err != nil { + return errors.Wrapf(err, "error creating volume %s dependencies bucket to add container %s", vol.Name, ctr.ID()) + } if depExists := ctrDepsBkt.Get(ctrID); depExists == nil { if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil { return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name) @@ -890,6 +893,9 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } ctrDepsBkt := volDB.Bucket(volDependenciesBkt) + if ctrDepsBkt == nil { + return errors.Wrapf(define.ErrInternal, "volume %s is missing container dependencies bucket, cannot remove container %s from dependencies", vol.Name, ctr.ID()) + } if depExists := ctrDepsBkt.Get(ctrID); depExists == nil { if err := ctrDepsBkt.Delete(ctrID); err != nil { return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name) diff --git a/libpod/container_exec.go b/libpod/container_exec.go index c1ce8b724..6ad767b4b 100644 --- a/libpod/container_exec.go +++ b/libpod/container_exec.go @@ -1,7 +1,9 @@ package libpod import ( + "bufio" "io/ioutil" + "net" "os" "path/filepath" "strconv" @@ -102,7 +104,7 @@ func (e *ExecSession) Inspect() (*define.InspectExecSession, error) { } output := new(define.InspectExecSession) - output.CanRemove = e.State != define.ExecStateRunning + output.CanRemove = e.State == define.ExecStateStopped output.ContainerID = e.ContainerId if e.Config.DetachKeys != nil { output.DetachKeys = *e.Config.DetachKeys @@ -156,9 +158,6 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) { if len(config.Command) == 0 { return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session") } - if config.Terminal && (config.AttachStdin || config.AttachStdout || config.AttachStderr) { - return "", errors.Wrapf(define.ErrInvalidArg, "cannot specify streams to attach to when exec session has a pseudoterminal") - } // Verify that we are in a good state to continue if !c.ensureState(define.ContainerStateRunning) { @@ -247,34 +246,12 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID()) - // TODO: check logic here - should we set Privileged if the container is - // privileged? - var capList []string - if session.Config.Privileged || c.config.Privileged { - capList = capabilities.AllCapabilities() - } - - user := c.config.User - if session.Config.User != "" { - user = session.Config.User - } - - if err := c.createExecBundle(session.ID()); err != nil { + opts, err := prepareForExec(c, session) + if err != nil { return err } - opts := new(ExecOptions) - opts.Cmd = session.Config.Command - opts.CapAdd = capList - opts.Env = session.Config.Environment - opts.Terminal = session.Config.Terminal - opts.Cwd = session.Config.WorkDir - opts.User = user - opts.Streams = streams - opts.PreserveFDs = session.Config.PreserveFDs - opts.DetachKeys = session.Config.DetachKeys - - pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts) + pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts, streams) if err != nil { return err } @@ -318,28 +295,124 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS c.lock.Lock() } - // Sync the container to pick up state changes - if err := c.syncContainer(); err != nil { + if err := writeExecExitCode(c, session.ID(), exitCode); err != nil { if lastErr != nil { logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) } - return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), session.ID()) + lastErr = err } - // Update status - // Since we did a syncContainer, the old session has been overwritten. - // Grab a fresh one from the database. - session, ok = c.state.ExecSessions[sessionID] + // Clean up after ourselves + if err := c.cleanupExecBundle(session.ID()); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + return lastErr +} + +// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session. +func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) (deferredErr error) { + // TODO: How do we combine streams with the default streams set in the exec session? + + // The flow here is somewhat strange, because we need to determine if + // there's a terminal ASAP (for error handling). + // Until we know, assume it's true (don't add standard stream headers). + // Add a defer to ensure our invariant (HTTP session is closed) is + // maintained. + isTerminal := true + defer func() { + hijackWriteErrorAndClose(deferredErr, c.ID(), isTerminal, httpCon, httpBuf) + }() + + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] if !ok { - // Exec session already removed. - logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID) - return nil + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) } - session.State = define.ExecStateStopped - session.ExitCode = exitCode - session.PID = 0 + // We can now finally get the real value of isTerminal. + isTerminal = session.Config.Terminal + + // Verify that we are in a good state to continue + if !c.ensureState(define.ContainerStateRunning) { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running") + } + + if session.State != define.ExecStateCreated { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID()) + + execOpts, err := prepareForExec(c, session) + if err != nil { + return err + } + + if streams == nil { + streams = new(HTTPAttachStreams) + streams.Stdin = session.Config.AttachStdin + streams.Stdout = session.Config.AttachStdout + streams.Stderr = session.Config.AttachStderr + } + + pid, attachChan, err := c.ociRuntime.ExecContainerHTTP(c, session.ID(), execOpts, httpCon, httpBuf, streams, cancel) + if err != nil { + return err + } + + // TODO: Investigate whether more of this can be made common with + // ExecStartAndAttach + + c.newContainerEvent(events.Exec) + logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID()) + + var lastErr error + + session.PID = pid + session.State = define.ExecStateRunning if err := c.save(); err != nil { + lastErr = err + } + + // Unlock so other processes can use the container + if !c.batched { + c.lock.Unlock() + } + + tmpErr := <-attachChan + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = tmpErr + + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + + // Lock again + if !c.batched { + c.lock.Lock() + } + + if err := writeExecExitCode(c, session.ID(), exitCode); err != nil { if lastErr != nil { logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) } @@ -357,12 +430,6 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS return lastErr } -// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session. -func (c *Container) ExecHTTPStartAndAttach(sessionID string) error { - // Will be implemented in part 2, migrating Start. - return define.ErrNotImplemented -} - // ExecStop stops an exec session in the container. // If a timeout is provided, it will be used; otherwise, the timeout will // default to the stop timeout of the container. @@ -814,3 +881,67 @@ func (c *Container) removeAllExecSessions() error { return lastErr } + +// Make an ExecOptions struct to start the OCI runtime and prepare its exec +// bundle. +func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) { + // TODO: check logic here - should we set Privileged if the container is + // privileged? + var capList []string + if session.Config.Privileged || c.config.Privileged { + capList = capabilities.AllCapabilities() + } + + user := c.config.User + if session.Config.User != "" { + user = session.Config.User + } + + if err := c.createExecBundle(session.ID()); err != nil { + return nil, err + } + + opts := new(ExecOptions) + opts.Cmd = session.Config.Command + opts.CapAdd = capList + opts.Env = session.Config.Environment + opts.Terminal = session.Config.Terminal + opts.Cwd = session.Config.WorkDir + opts.User = user + opts.PreserveFDs = session.Config.PreserveFDs + opts.DetachKeys = session.Config.DetachKeys + + return opts, nil +} + +// Write an exec session's exit code to the database +func writeExecExitCode(c *Container, sessionID string, exitCode int) error { + // We can't reuse the old exec session (things may have changed from + // under use, the container was unlocked). + // So re-sync and get a fresh copy. + // If we can't do this, no point in continuing, any attempt to save + // would write garbage to the DB. + if err := c.syncContainer(); err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved { + // Container's entirely removed. We can't save status, + // but the container's entirely removed, so we don't + // need to. Exit without error. + return nil + } + return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID) + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + // Exec session already removed. + logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID) + return nil + } + + session.State = define.ExecStateStopped + session.ExitCode = exitCode + session.PID = 0 + + // Finally, save our changes. + return c.save() +} diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 3fcf687ec..909ad9851 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1011,6 +1011,14 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { logrus.Debugf("Created container %s in OCI runtime", c.ID()) + // Remove any exec sessions leftover from a potential prior run. + if len(c.state.ExecSessions) > 0 { + if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil { + logrus.Errorf("Error removing container %s exec sessions from DB: %v", c.ID(), err) + } + c.state.ExecSessions = make(map[string]*ExecSession) + } + c.state.ExitCode = 0 c.state.Exited = false c.state.State = define.ContainerStateCreated @@ -1562,21 +1570,24 @@ func (c *Container) cleanup(ctx context.Context) error { lastError = errors.Wrapf(err, "error removing container %s network", c.ID()) } - // Unmount storage - if err := c.cleanupStorage(); err != nil { + // Remove the container from the runtime, if necessary. + // Do this *before* unmounting storage - some runtimes (e.g. Kata) + // apparently object to having storage removed while the container still + // exists. + if err := c.cleanupRuntime(ctx); err != nil { if lastError != nil { - logrus.Errorf("Error unmounting container %s storage: %v", c.ID(), err) + logrus.Errorf("Error removing container %s from OCI runtime: %v", c.ID(), err) } else { - lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID()) + lastError = err } } - // Remove the container from the runtime, if necessary - if err := c.cleanupRuntime(ctx); err != nil { + // Unmount storage + if err := c.cleanupStorage(); err != nil { if lastError != nil { - logrus.Errorf("Error removing container %s from OCI runtime: %v", c.ID(), err) + logrus.Errorf("Error unmounting container %s storage: %v", c.ID(), err) } else { - lastError = err + lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID()) } } diff --git a/libpod/oci.go b/libpod/oci.go index 9991c5625..6b1886f80 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -61,8 +61,7 @@ type OCIRuntime interface { // the attach session to be terminated if provided via the STDIN // channel. If they are not provided, the default detach keys will be // used instead. Detach keys of "" will disable detaching via keyboard. - // The streams parameter may be passed for containers that did not - // create a terminal and will determine which streams to forward to the + // The streams parameter will determine which streams to forward to the // client. HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error // AttachResize resizes the terminal in use by the given container. @@ -71,7 +70,17 @@ type OCIRuntime interface { // ExecContainer executes a command in a running container. // Returns an int (exit code), error channel (errors from attach), and // error (errors that occurred attempting to start the exec session). - ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) + // This returns once the exec session is running - not once it has + // completed, as one might expect. The attach session will remain + // running, in a goroutine that will return via the chan error in the + // return signature. + ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) + // ExecContainerHTTP executes a command in a running container and + // attaches its standard streams to a provided hijacked HTTP session. + // Maintains the same invariants as ExecContainer (returns on session + // start, with a goroutine running in the background to handle attach). + // The HTTP attach itself maintains the same invariants as HTTPAttach. + ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) // ExecAttachResize resizes the terminal of a running exec session. Only // allowed with sessions that were created with a TTY. ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index d1c1a1fc2..895a67747 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -636,8 +636,7 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.Te } // ExecContainer executes a command in a running container -// TODO: Split into Create/Start/Attach/Wait -func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions) (int, chan error, error) { +func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) { if options == nil { return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer") } @@ -649,178 +648,111 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") } - // create sync pipe to receive the pid - parentSyncPipe, childSyncPipe, err := newPipe() - if err != nil { - return -1, nil, errors.Wrapf(err, "error creating socket pair") + // TODO: Should we default this to false? + // Or maybe make streams mandatory? + attachStdin := true + if streams != nil { + attachStdin = streams.AttachInput } - defer errorhandling.CloseQuiet(parentSyncPipe) - - // create start pipe to set the cgroup before running - // attachToExec is responsible for closing parentStartPipe - childStartPipe, parentStartPipe, err := newPipe() - if err != nil { - return -1, nil, errors.Wrapf(err, "error creating socket pair") + var ociLog string + if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { + ociLog = c.execOCILog(sessionID) } - // We want to make sure we close the parent{Start,Attach}Pipes if we fail - // but also don't want to close them after attach to exec is called - attachToExecCalled := false - - defer func() { - if !attachToExecCalled { - errorhandling.CloseQuiet(parentStartPipe) - } - }() - - // create the attach pipe to allow attach socket to be created before - // $RUNTIME exec starts running. This is to make sure we can capture all output - // from the process through that socket, rather than half reading the log, half attaching to the socket - // attachToExec is responsible for closing parentAttachPipe - parentAttachPipe, childAttachPipe, err := newPipe() + execCmd, pipes, err := r.startExec(c, sessionID, options, attachStdin, ociLog) if err != nil { - return -1, nil, errors.Wrapf(err, "error creating socket pair") + return -1, nil, err } + // Only close sync pipe. Start and attach are consumed in the attach + // goroutine. defer func() { - if !attachToExecCalled { - errorhandling.CloseQuiet(parentAttachPipe) + if pipes.syncPipe != nil && !pipes.syncClosed { + errorhandling.CloseQuiet(pipes.syncPipe) + pipes.syncClosed = true } }() - childrenClosed := false - defer func() { - if !childrenClosed { - errorhandling.CloseQuiet(childSyncPipe) - errorhandling.CloseQuiet(childAttachPipe) - errorhandling.CloseQuiet(childStartPipe) - } + // TODO Only create if !detach + // Attach to the container before starting it + attachChan := make(chan error) + go func() { + // attachToExec is responsible for closing pipes + attachChan <- c.attachToExec(streams, options.DetachKeys, sessionID, pipes.startPipe, pipes.attachPipe) + close(attachChan) }() - runtimeDir, err := util.GetRuntimeDir() - if err != nil { - return -1, nil, err - } - - finalEnv := make([]string, 0, len(options.Env)) - for k, v := range options.Env { - finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v)) - } - - processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID) - if err != nil { - return -1, nil, err - } - - var ociLog string - if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { - ociLog = c.execOCILog(sessionID) + if err := execCmd.Wait(); err != nil { + return -1, nil, errors.Wrapf(err, "cannot run conmon") } - args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "") - if options.PreserveFDs > 0 { - args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...) - } + pid, err := readConmonPipeData(pipes.syncPipe, ociLog) - for _, capability := range options.CapAdd { - args = append(args, formatRuntimeOpts("--cap", capability)...) - } + return pid, attachChan, err +} - if options.Terminal { - args = append(args, "-t") +// ExecContainerHTTP executes a new command in an existing container and +// forwards its standard streams over an attach +func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) { + if streams != nil { + if !streams.Stdin && !streams.Stdout && !streams.Stderr { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") + } } - if options.Streams != nil && options.Streams.AttachInput { - args = append(args, "-i") + if options == nil { + return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP") } - // Append container ID and command - args = append(args, "-e") - // TODO make this optional when we can detach - args = append(args, "--exec-attach") - args = append(args, "--exec-process-spec", processFile.Name()) - - logrus.WithFields(logrus.Fields{ - "args": args, - }).Debugf("running conmon: %s", r.conmonPath) - execCmd := exec.Command(r.conmonPath, args...) - - if options.Streams != nil { - // Don't add the InputStream to the execCmd. Instead, the data should be passed - // through CopyDetachable - if options.Streams.AttachOutput { - execCmd.Stdout = options.Streams.OutputStream - } - if options.Streams.AttachError { - execCmd.Stderr = options.Streams.ErrorStream - } + detachString := config.DefaultDetachKeys + if options.DetachKeys != nil { + detachString = *options.DetachKeys } - - conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir) + detachKeys, err := processDetachKeys(detachString) if err != nil { return -1, nil, err } - if options.PreserveFDs > 0 { - for fd := 3; fd < int(3+options.PreserveFDs); fd++ { - execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))) - } + // TODO: Should we default this to false? + // Or maybe make streams mandatory? + attachStdin := true + if streams != nil { + attachStdin = streams.Stdin } - // we don't want to step on users fds they asked to preserve - // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 - execCmd.Env = r.conmonEnv - execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5)) - execCmd.Env = append(execCmd.Env, conmonEnv...) - - execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe) - execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...) - execCmd.Dir = c.execBundlePath(sessionID) - execCmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, + var ociLog string + if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON { + ociLog = ctr.execOCILog(sessionID) } - err = startCommandGivenSelinux(execCmd) - - // We don't need children pipes on the parent side - errorhandling.CloseQuiet(childSyncPipe) - errorhandling.CloseQuiet(childAttachPipe) - errorhandling.CloseQuiet(childStartPipe) - childrenClosed = true - + execCmd, pipes, err := r.startExec(ctr, sessionID, options, attachStdin, ociLog) if err != nil { - return -1, nil, errors.Wrapf(err, "cannot start container %s", c.ID()) - } - if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil { return -1, nil, err } - if options.PreserveFDs > 0 { - for fd := 3; fd < int(3+options.PreserveFDs); fd++ { - // These fds were passed down to the runtime. Close them - // and not interfere - if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil { - logrus.Debugf("unable to close file fd-%d", fd) - } + // Only close sync pipe. Start and attach are consumed in the attach + // goroutine. + defer func() { + if pipes.syncPipe != nil && !pipes.syncClosed { + errorhandling.CloseQuiet(pipes.syncPipe) + pipes.syncClosed = true } - } + }() - // TODO Only create if !detach - // Attach to the container before starting it attachChan := make(chan error) go func() { // attachToExec is responsible for closing pipes - attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe) + attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel) close(attachChan) }() - attachToExecCalled = true + // Wait for conmon to succeed, when return. if err := execCmd.Wait(); err != nil { return -1, nil, errors.Wrapf(err, "cannot run conmon") } - pid, err := readConmonPipeData(parentSyncPipe, ociLog) + pid, err := readConmonPipeData(pipes.syncPipe, ociLog) return pid, attachChan, err } @@ -1829,3 +1761,297 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, } } + +// This contains pipes used by the exec API. +type execPipes struct { + syncPipe *os.File + syncClosed bool + startPipe *os.File + startClosed bool + attachPipe *os.File + attachClosed bool +} + +func (p *execPipes) cleanup() { + if p.syncPipe != nil && !p.syncClosed { + errorhandling.CloseQuiet(p.syncPipe) + p.syncClosed = true + } + if p.startPipe != nil && !p.startClosed { + errorhandling.CloseQuiet(p.startPipe) + p.startClosed = true + } + if p.attachPipe != nil && !p.attachClosed { + errorhandling.CloseQuiet(p.attachPipe) + p.attachClosed = true + } +} + +// Start an exec session's conmon parent from the given options. +func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *ExecOptions, attachStdin bool, ociLog string) (_ *exec.Cmd, _ *execPipes, deferredErr error) { + pipes := new(execPipes) + + if options == nil { + return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer") + } + if len(options.Cmd) == 0 { + return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute") + } + + if sessionID == "" { + return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec") + } + + // create sync pipe to receive the pid + parentSyncPipe, childSyncPipe, err := newPipe() + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating socket pair") + } + pipes.syncPipe = parentSyncPipe + + defer func() { + if deferredErr != nil { + pipes.cleanup() + } + }() + + // create start pipe to set the cgroup before running + // attachToExec is responsible for closing parentStartPipe + childStartPipe, parentStartPipe, err := newPipe() + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating socket pair") + } + pipes.startPipe = parentStartPipe + + // create the attach pipe to allow attach socket to be created before + // $RUNTIME exec starts running. This is to make sure we can capture all output + // from the process through that socket, rather than half reading the log, half attaching to the socket + // attachToExec is responsible for closing parentAttachPipe + parentAttachPipe, childAttachPipe, err := newPipe() + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating socket pair") + } + pipes.attachPipe = parentAttachPipe + + childrenClosed := false + defer func() { + if !childrenClosed { + errorhandling.CloseQuiet(childSyncPipe) + errorhandling.CloseQuiet(childAttachPipe) + errorhandling.CloseQuiet(childStartPipe) + } + }() + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return nil, nil, err + } + + finalEnv := make([]string, 0, len(options.Env)) + for k, v := range options.Env { + finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v)) + } + + processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID) + if err != nil { + return nil, nil, err + } + + args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "") + + if options.PreserveFDs > 0 { + args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...) + } + + for _, capability := range options.CapAdd { + args = append(args, formatRuntimeOpts("--cap", capability)...) + } + + if options.Terminal { + args = append(args, "-t") + } + + if attachStdin { + args = append(args, "-i") + } + + // Append container ID and command + args = append(args, "-e") + // TODO make this optional when we can detach + args = append(args, "--exec-attach") + args = append(args, "--exec-process-spec", processFile.Name()) + + logrus.WithFields(logrus.Fields{ + "args": args, + }).Debugf("running conmon: %s", r.conmonPath) + // TODO: Need to pass this back so we can wait on it. + execCmd := exec.Command(r.conmonPath, args...) + + // TODO: This is commented because it doesn't make much sense in HTTP + // attach, and I'm not certain it does for non-HTTP attach as well. + // if streams != nil { + // // Don't add the InputStream to the execCmd. Instead, the data should be passed + // // through CopyDetachable + // if streams.AttachOutput { + // execCmd.Stdout = options.Streams.OutputStream + // } + // if streams.AttachError { + // execCmd.Stderr = options.Streams.ErrorStream + // } + // } + + conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir) + if err != nil { + return nil, nil, err + } + + if options.PreserveFDs > 0 { + for fd := 3; fd < int(3+options.PreserveFDs); fd++ { + execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))) + } + } + + // we don't want to step on users fds they asked to preserve + // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 + execCmd.Env = r.conmonEnv + execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5)) + execCmd.Env = append(execCmd.Env, conmonEnv...) + + execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe) + execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...) + execCmd.Dir = c.execBundlePath(sessionID) + execCmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + err = startCommandGivenSelinux(execCmd) + + // We don't need children pipes on the parent side + errorhandling.CloseQuiet(childSyncPipe) + errorhandling.CloseQuiet(childAttachPipe) + errorhandling.CloseQuiet(childStartPipe) + childrenClosed = true + + if err != nil { + return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID()) + } + if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil { + return nil, nil, err + } + + if options.PreserveFDs > 0 { + for fd := 3; fd < int(3+options.PreserveFDs); fd++ { + // These fds were passed down to the runtime. Close them + // and not interfere + if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil { + logrus.Debugf("unable to close file fd-%d", fd) + } + } + } + + return execCmd, pipes, nil +} + +// Attach to a container over HTTP +func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error { + if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil { + return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach") + } + + defer func() { + if !pipes.startClosed { + errorhandling.CloseQuiet(pipes.startPipe) + pipes.startClosed = true + } + if !pipes.attachClosed { + errorhandling.CloseQuiet(pipes.attachPipe) + pipes.attachClosed = true + } + }() + + logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID) + + // set up the socket path, such that it is the correct length and location for exec + sockPath, err := c.execAttachSocketPath(sessionID) + if err != nil { + return err + } + socketPath := buildSocketPath(sockPath) + + // 2: read from attachFd that the parent process has set up the console socket + if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { + return err + } + + // 2: then attach + conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"}) + if err != nil { + return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath) + } + defer func() { + if err := conn.Close(); err != nil { + logrus.Errorf("unable to close socket: %q", err) + } + }() + + // Make a channel to pass errors back + errChan := make(chan error) + + attachStdout := true + attachStderr := true + attachStdin := true + if streams != nil { + attachStdout = streams.Stdout + attachStderr = streams.Stderr + attachStdin = streams.Stdin + } + + // Next, STDIN. Avoid entirely if attachStdin unset. + if attachStdin { + go func() { + logrus.Debugf("Beginning STDIN copy") + _, err := utils.CopyDetachable(conn, httpBuf, detachKeys) + logrus.Debugf("STDIN copy completed") + errChan <- err + }() + } + + // 4: send start message to child + if err := writeConmonPipeData(pipes.startPipe); err != nil { + return err + } + + // Handle STDOUT/STDERR *after* start message is sent + go func() { + var err error + if isTerminal { + // Hack: return immediately if attachStdout not set to + // emulate Docker. + // Basically, when terminal is set, STDERR goes nowhere. + // Everything does over STDOUT. + // Therefore, if not attaching STDOUT - we'll never copy + // anything from here. + logrus.Debugf("Performing terminal HTTP attach for container %s", c.ID()) + if attachStdout { + err = httpAttachTerminalCopy(conn, httpBuf, c.ID()) + } + } else { + logrus.Debugf("Performing non-terminal HTTP attach for container %s", c.ID()) + err = httpAttachNonTerminalCopy(conn, httpBuf, c.ID(), attachStdin, attachStdout, attachStderr) + } + errChan <- err + logrus.Debugf("STDOUT/ERR copy completed") + }() + + if cancel != nil { + select { + case err := <-errChan: + return err + case <-cancel: + return nil + } + } else { + var connErr error = <-errChan + return connErr + } +} diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go index 172805b0d..626740f72 100644 --- a/libpod/oci_missing.go +++ b/libpod/oci_missing.go @@ -121,7 +121,12 @@ func (r *MissingRuntime) AttachResize(ctr *Container, newSize remotecommand.Term } // ExecContainer is not available as the runtime is missing -func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) { +func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) { + return -1, nil, r.printError() +} + +// ExecContainerHTTP is not available as the runtime is missing +func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) { return -1, nil, r.printError() } diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 1d880531e..c670822a0 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -488,20 +488,25 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } + var cleanupErr error + + // Clean up network namespace, cgroups, mounts. + // Do this before we set ContainerStateRemoving, to ensure that we can + // actually remove from the OCI runtime. + if err := c.cleanup(ctx); err != nil { + cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID()) + } + // Set ContainerStateRemoving c.state.State = define.ContainerStateRemoving if err := c.save(); err != nil { + if cleanupErr != nil { + logrus.Errorf(err.Error()) + } return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID()) } - var cleanupErr error - - // Clean up network namespace, cgroups, mounts - if err := c.cleanup(ctx); err != nil { - cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID()) - } - // Stop the container's storage if err := c.teardownStorage(); err != nil { if cleanupErr == nil { diff --git a/pkg/api/handlers/compat/containers_attach.go b/pkg/api/handlers/compat/containers_attach.go index 52c851b8c..3c9a6fd69 100644 --- a/pkg/api/handlers/compat/containers_attach.go +++ b/pkg/api/handlers/compat/containers_attach.go @@ -13,6 +13,12 @@ import ( "k8s.io/client-go/tools/remotecommand" ) +// AttachHeader is the literal header sent for upgraded/hijacked connections for +// attach, sourced from Docker at: +// https://raw.githubusercontent.com/moby/moby/b95fad8e51bd064be4f4e58a996924f343846c85/api/server/router/container/container_routes.go +// Using literally to ensure compatibility with existing clients. +const AttachHeader = "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n" + func AttachContainer(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) decoder := r.Context().Value("decoder").(*schema.Decoder) @@ -106,10 +112,7 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) { return } - // This header string sourced from Docker: - // https://raw.githubusercontent.com/moby/moby/b95fad8e51bd064be4f4e58a996924f343846c85/api/server/router/container/container_routes.go - // Using literally to ensure compatibility with existing clients. - fmt.Fprintf(connection, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + fmt.Fprintf(connection, AttachHeader) logrus.Debugf("Hijack for attach of container %s successful", ctr.ID()) diff --git a/pkg/api/handlers/compat/containers_start.go b/pkg/api/handlers/compat/containers_start.go index 67bd287ab..9cb1492fb 100644 --- a/pkg/api/handlers/compat/containers_start.go +++ b/pkg/api/handlers/compat/containers_start.go @@ -3,11 +3,12 @@ package compat import ( "net/http" + "github.com/sirupsen/logrus" + "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/api/handlers/utils" "github.com/gorilla/schema" - "github.com/pkg/errors" ) func StartContainer(w http.ResponseWriter, r *http.Request) { @@ -23,8 +24,7 @@ func StartContainer(w http.ResponseWriter, r *http.Request) { } if len(query.DetachKeys) > 0 { // TODO - start does not support adding detach keys - utils.BadRequest(w, "detachKeys", query.DetachKeys, errors.New("the detachKeys parameter is not supported yet")) - return + logrus.Info("the detach keys parameter is not supported on start container") } runtime := r.Context().Value("runtime").(*libpod.Runtime) name := utils.GetName(r) diff --git a/pkg/api/handlers/compat/exec.go b/pkg/api/handlers/compat/exec.go index ec1a8ac96..6865a3319 100644 --- a/pkg/api/handlers/compat/exec.go +++ b/pkg/api/handlers/compat/exec.go @@ -104,4 +104,76 @@ func ExecInspectHandler(w http.ResponseWriter, r *http.Request) { } utils.WriteResponse(w, http.StatusOK, inspectOut) + + // Only for the Compat API: we want to remove sessions that were + // stopped. This is very hacky, but should suffice for now. + if !utils.IsLibpodRequest(r) && inspectOut.CanRemove { + logrus.Infof("Pruning stale exec session %s from container %s", sessionID, sessionCtr.ID()) + if err := sessionCtr.ExecRemove(sessionID, false); err != nil && errors.Cause(err) != define.ErrNoSuchExecSession { + logrus.Errorf("Error removing stale exec session %s from container %s: %v", sessionID, sessionCtr.ID(), err) + } + } +} + +// ExecStartHandler runs a given exec session. +func ExecStartHandler(w http.ResponseWriter, r *http.Request) { + runtime := r.Context().Value("runtime").(*libpod.Runtime) + + sessionID := mux.Vars(r)["id"] + + // TODO: We should read/support Tty and Detach from here. + bodyParams := new(handlers.ExecStartConfig) + + if err := json.NewDecoder(r.Body).Decode(&bodyParams); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to decode parameters for %s", r.URL.String())) + return + } + if bodyParams.Detach { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Errorf("Detached exec is not yet supported")) + return + } + // TODO: Verify TTY setting against what inspect session was made with + + sessionCtr, err := runtime.GetExecSessionContainer(sessionID) + if err != nil { + utils.Error(w, fmt.Sprintf("No such exec session: %s", sessionID), http.StatusNotFound, err) + return + } + + logrus.Debugf("Starting exec session %s of container %s", sessionID, sessionCtr.ID()) + + state, err := sessionCtr.State() + if err != nil { + utils.InternalServerError(w, err) + return + } + if state != define.ContainerStateRunning { + utils.Error(w, http.StatusText(http.StatusConflict), http.StatusConflict, errors.Errorf("cannot exec in a container that is not running; container %s is %s", sessionCtr.ID(), state.String())) + return + } + + // Hijack the connection + hijacker, ok := w.(http.Hijacker) + if !ok { + utils.InternalServerError(w, errors.Errorf("unable to hijack connection")) + return + } + + connection, buffer, err := hijacker.Hijack() + if err != nil { + utils.InternalServerError(w, errors.Wrapf(err, "error hijacking connection")) + return + } + + fmt.Fprintf(connection, AttachHeader) + + logrus.Debugf("Hijack for attach of container %s exec session %s successful", sessionCtr.ID(), sessionID) + + if err := sessionCtr.ExecHTTPStartAndAttach(sessionID, connection, buffer, nil, nil, nil); err != nil { + logrus.Errorf("Error attaching to container %s exec session %s: %v", sessionCtr.ID(), sessionID, err) + } + + logrus.Debugf("Attach for container %s exec session %s completed successfully", sessionCtr.ID(), sessionID) } diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index 2075d29df..d8cdd9caf 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -170,6 +170,11 @@ type ExecCreateResponse struct { docker.IDResponse } +type ExecStartConfig struct { + Detach bool `json:"Detach"` + Tty bool `json:"Tty"` +} + func ImageToImageSummary(l *libpodImage.Image) (*entities.ImageSummary, error) { containers, err := l.Containers() if err != nil { diff --git a/pkg/api/server/register_exec.go b/pkg/api/server/register_exec.go index 71fb50307..19b7e2fcd 100644 --- a/pkg/api/server/register_exec.go +++ b/pkg/api/server/register_exec.go @@ -97,10 +97,10 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // properties: // Detach: // type: boolean - // description: Detach from the command + // description: Detach from the command. Not presently supported. // Tty: // type: boolean - // description: Allocate a pseudo-TTY + // description: Allocate a pseudo-TTY. Presently ignored. // produces: // - application/json // responses: @@ -109,12 +109,12 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // 404: // $ref: "#/responses/NoSuchExecInstance" // 409: - // description: container is stopped or paused + // description: container is not running // 500: // $ref: "#/responses/InternalError" - r.Handle(VersionedPath("/exec/{id}/start"), s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost) + r.Handle(VersionedPath("/exec/{id}/start"), s.APIHandler(compat.ExecStartHandler)).Methods(http.MethodPost) // Added non version path to URI to support docker non versioned paths - r.Handle("/exec/{id}/start", s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost) + r.Handle("/exec/{id}/start", s.APIHandler(compat.ExecStartHandler)).Methods(http.MethodPost) // swagger:operation POST /exec/{id}/resize compat resizeExec // --- // tags: @@ -153,7 +153,7 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // tags: // - exec (compat) // summary: Inspect an exec instance - // description: Return low-level information about an exec instance. + // description: Return low-level information about an exec instance. Stale (stopped) exec sessions will be auto-removed after inspect runs. // parameters: // - in: path // name: id @@ -264,10 +264,10 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // properties: // Detach: // type: boolean - // description: Detach from the command + // description: Detach from the command. Not presently supported. // Tty: // type: boolean - // description: Allocate a pseudo-TTY + // description: Allocate a pseudo-TTY. Presently ignored. // produces: // - application/json // responses: @@ -276,10 +276,10 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error { // 404: // $ref: "#/responses/NoSuchExecInstance" // 409: - // description: container is stopped or paused + // description: container is not running. // 500: // $ref: "#/responses/InternalError" - r.Handle(VersionedPath("/libpod/exec/{id}/start"), s.APIHandler(compat.UnsupportedHandler)).Methods(http.MethodPost) + r.Handle(VersionedPath("/libpod/exec/{id}/start"), s.APIHandler(compat.ExecStartHandler)).Methods(http.MethodPost) // swagger:operation POST /libpod/exec/{id}/resize libpod libpodResizeExec // --- // tags: diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go index 0e8d68b7e..36f939779 100644 --- a/pkg/api/server/register_images.go +++ b/pkg/api/server/register_images.go @@ -854,7 +854,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // 500: // $ref: '#/responses/InternalError' r.Handle(VersionedPath("/libpod/images/remove"), s.APIHandler(libpod.ImagesBatchRemove)).Methods(http.MethodDelete) - // swagger:operation DELETE /libpod/images/{name:.*}/remove libpod libpodRemoveImage + // swagger:operation DELETE /libpod/images/{name:.*} libpod libpodRemoveImage // --- // tags: // - images @@ -883,7 +883,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error { // $ref: '#/responses/ConflictError' // 500: // $ref: '#/responses/InternalError' - r.Handle(VersionedPath("/libpod/images/{name:.*}/remove"), s.APIHandler(libpod.ImagesRemove)).Methods(http.MethodDelete) + r.Handle(VersionedPath("/libpod/images/{name:.*}"), s.APIHandler(libpod.ImagesRemove)).Methods(http.MethodDelete) // swagger:operation POST /libpod/images/pull libpod libpodImagesPull // --- // tags: diff --git a/pkg/bindings/bindings.go b/pkg/bindings/bindings.go index 4b07847d1..5e2882aae 100644 --- a/pkg/bindings/bindings.go +++ b/pkg/bindings/bindings.go @@ -11,8 +11,10 @@ package bindings var ( // PTrue is a convenience variable that can be used in bindings where // a pointer to a bool (optional parameter) is required. - PTrue bool = true + pTrue = true + PTrue = &pTrue // PFalse is a convenience variable that can be used in bindings where // a pointer to a bool (optional parameter) is required. - PFalse bool = false + pFalse = false + PFalse = &pFalse ) diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go index da3755fc8..d83c0482c 100644 --- a/pkg/bindings/connection.go +++ b/pkg/bindings/connection.go @@ -39,6 +39,7 @@ type APIResponse struct { type Connection struct { _url *url.URL client *http.Client + conn *net.Conn } type valueKey string @@ -88,26 +89,26 @@ func NewConnection(ctx context.Context, uri string, identity ...string) (context } // Now we setup the http client to use the connection above - var client *http.Client + var connection Connection switch _url.Scheme { case "ssh": secure, err = strconv.ParseBool(_url.Query().Get("secure")) if err != nil { secure = false } - client, err = sshClient(_url, identity[0], secure) + connection, err = sshClient(_url, identity[0], secure) case "unix": if !strings.HasPrefix(uri, "unix:///") { // autofix unix://path_element vs unix:///path_element _url.Path = JoinURL(_url.Host, _url.Path) _url.Host = "" } - client, err = unixClient(_url) + connection, err = unixClient(_url) case "tcp": if !strings.HasPrefix(uri, "tcp://") { return nil, errors.New("tcp URIs should begin with tcp://") } - client, err = tcpClient(_url) + connection, err = tcpClient(_url) default: return nil, errors.Errorf("'%s' is not a supported schema", _url.Scheme) } @@ -115,22 +116,30 @@ func NewConnection(ctx context.Context, uri string, identity ...string) (context return nil, errors.Wrapf(err, "Failed to create %sClient", _url.Scheme) } - ctx = context.WithValue(ctx, clientKey, &Connection{_url, client}) + ctx = context.WithValue(ctx, clientKey, &connection) if err := pingNewConnection(ctx); err != nil { return nil, err } return ctx, nil } -func tcpClient(_url *url.URL) (*http.Client, error) { - return &http.Client{ +func tcpClient(_url *url.URL) (Connection, error) { + connection := Connection{ + _url: _url, + } + connection.client = &http.Client{ Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("tcp", _url.Host) + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + conn, err := net.Dial("tcp", _url.Host) + if c, ok := ctx.Value(clientKey).(*Connection); ok { + c.conn = &conn + } + return conn, err }, DisableCompression: true, }, - }, nil + } + return connection, nil } // pingNewConnection pings to make sure the RESTFUL service is up @@ -151,10 +160,10 @@ func pingNewConnection(ctx context.Context) error { return errors.Errorf("ping response was %q", response.StatusCode) } -func sshClient(_url *url.URL, identity string, secure bool) (*http.Client, error) { +func sshClient(_url *url.URL, identity string, secure bool) (Connection, error) { auth, err := publicKey(identity) if err != nil { - return nil, errors.Wrapf(err, "Failed to parse identity %s: %v\n", _url.String(), identity) + return Connection{}, errors.Wrapf(err, "Failed to parse identity %s: %v\n", _url.String(), identity) } callback := ssh.InsecureIgnoreHostKey() @@ -188,26 +197,39 @@ func sshClient(_url *url.URL, identity string, secure bool) (*http.Client, error }, ) if err != nil { - return nil, errors.Wrapf(err, "Connection to bastion host (%s) failed.", _url.String()) + return Connection{}, errors.Wrapf(err, "Connection to bastion host (%s) failed.", _url.String()) } - return &http.Client{ + + connection := Connection{_url: _url} + connection.client = &http.Client{ Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return bastion.Dial("unix", _url.Path) + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + conn, err := bastion.Dial("unix", _url.Path) + if c, ok := ctx.Value(clientKey).(*Connection); ok { + c.conn = &conn + } + return conn, err }, - }}, nil + }} + return connection, nil } -func unixClient(_url *url.URL) (*http.Client, error) { - return &http.Client{ +func unixClient(_url *url.URL) (Connection, error) { + connection := Connection{_url: _url} + connection.client = &http.Client{ Transport: &http.Transport{ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { d := net.Dialer{} - return d.DialContext(ctx, "unix", _url.Path) + conn, err := d.DialContext(ctx, "unix", _url.Path) + if c, ok := ctx.Value(clientKey).(*Connection); ok { + c.conn = &conn + } + return conn, err }, DisableCompression: true, }, - }, nil + } + return connection, nil } // DoRequest assembles the http request and returns the response @@ -232,6 +254,7 @@ func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string, if len(queryParams) > 0 { req.URL.RawQuery = queryParams.Encode() } + req = req.WithContext(context.WithValue(context.Background(), clientKey, c)) // Give the Do three chances in the case of a comm/service hiccup for i := 0; i < 3; i++ { response, err = c.client.Do(req) // nolint @@ -243,6 +266,10 @@ func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string, return &APIResponse{response, req}, err } +func (c *Connection) Write(b []byte) (int, error) { + return (*c.conn).Write(b) +} + // FiltersToString converts our typical filter format of a // map[string][]string to a query/html safe string. func FiltersToString(filters map[string][]string) (string, error) { @@ -295,8 +322,8 @@ func publicKey(path string) (ssh.AuthMethod, error) { func hostKey(host string) ssh.PublicKey { // parse OpenSSH known_hosts file // ssh or use ssh-keyscan to get initial key - known_hosts := filepath.Join(homedir.HomeDir(), ".ssh", "known_hosts") - fd, err := os.Open(known_hosts) + knownHosts := filepath.Join(homedir.HomeDir(), ".ssh", "known_hosts") + fd, err := os.Open(knownHosts) if err != nil { logrus.Error(err) return nil diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go index de7b792b4..b77ef208d 100644 --- a/pkg/bindings/containers/containers.go +++ b/pkg/bindings/containers/containers.go @@ -15,6 +15,7 @@ import ( "github.com/containers/libpod/pkg/bindings" "github.com/containers/libpod/pkg/domain/entities" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( @@ -341,12 +342,18 @@ func ContainerInit(ctx context.Context, nameOrID string) error { } // Attach attaches to a running container -func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stream *bool, stdin *bool, stdout io.Writer, stderr io.Writer) error { +func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stream *bool, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { conn, err := bindings.GetClient(ctx) if err != nil { return err } + // Do we need to wire in stdin? + ctnr, err := Inspect(ctx, nameOrId, bindings.PFalse) + if err != nil { + return err + } + params := url.Values{} if detachKeys != nil { params.Add("detachKeys", *detachKeys) @@ -357,7 +364,7 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre if stream != nil { params.Add("stream", fmt.Sprintf("%t", *stream)) } - if stdin != nil && *stdin { + if stdin != nil { params.Add("stdin", "true") } if stdout != nil { @@ -373,11 +380,23 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre } defer response.Body.Close() - ctype := response.Header.Get("Content-Type") - upgrade := response.Header.Get("Connection") + if stdin != nil { + go func() { + _, err := io.Copy(conn, stdin) + if err != nil { + logrus.Error("failed to write input to service: " + err.Error()) + } + }() + } buffer := make([]byte, 1024) - if ctype == "application/vnd.docker.raw-stream" && upgrade == "Upgrade" { + if ctnr.Config.Tty { + // If not multiplex'ed, read from server and write to stdout + _, err := io.Copy(stdout, response.Body) + if err != nil { + return err + } + } else { for { // Read multiplexed channels and write to appropriate stream fd, l, err := DemuxHeader(response.Body, buffer) @@ -396,30 +415,27 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre } switch { - case fd == 0 && stdin != nil && *stdin: - stdout.Write(frame) + case fd == 0 && stdin != nil: + _, err := stdout.Write(frame[0:l]) + if err != nil { + return err + } case fd == 1 && stdout != nil: - stdout.Write(frame) + _, err := stdout.Write(frame[0:l]) + if err != nil { + return err + } case fd == 2 && stderr != nil: - stderr.Write(frame) + _, err := stderr.Write(frame[0:l]) + if err != nil { + return err + } case fd == 3: return fmt.Errorf("error from daemon in stream: %s", frame) default: return fmt.Errorf("unrecognized input header: %d", fd) } } - } else { - // If not multiplex'ed from server just dump stream to stdout - for { - _, err := response.Body.Read(buffer) - if err != nil { - if !errors.Is(err, io.EOF) { - return err - } - break - } - stdout.Write(buffer) - } } return err } diff --git a/pkg/bindings/images/rm.go b/pkg/bindings/images/rm.go index e3b5590df..05aa3f9ca 100644 --- a/pkg/bindings/images/rm.go +++ b/pkg/bindings/images/rm.go @@ -52,7 +52,7 @@ func Remove(ctx context.Context, nameOrID string, force bool) (*entities.ImageRe params := url.Values{} params.Set("force", strconv.FormatBool(force)) - response, err := conn.DoRequest(nil, http.MethodDelete, "/images/%s/remove", params, nameOrID) + response, err := conn.DoRequest(nil, http.MethodDelete, "/images/%s", params, nameOrID) if err != nil { return nil, err } diff --git a/pkg/bindings/test/attach_test.go b/pkg/bindings/test/attach_test.go index 8e89ff8ff..906bd2950 100644 --- a/pkg/bindings/test/attach_test.go +++ b/pkg/bindings/test/attach_test.go @@ -2,10 +2,13 @@ package test_bindings import ( "bytes" + "fmt" "time" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/bindings" "github.com/containers/libpod/pkg/bindings/containers" + "github.com/containers/libpod/pkg/specgen" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" @@ -31,7 +34,7 @@ var _ = Describe("Podman containers attach", func() { bt.cleanup() }) - It("attach", func() { + It("can run top in container", func() { name := "TopAttachTest" id, err := bt.RunTopContainer(&name, nil, nil) Expect(err).ShouldNot(HaveOccurred()) @@ -51,7 +54,7 @@ var _ = Describe("Podman containers attach", func() { go func() { defer GinkgoRecover() - err := containers.Attach(bt.conn, id, nil, &bindings.PTrue, &bindings.PTrue, &bindings.PTrue, stdout, stderr) + err := containers.Attach(bt.conn, id, nil, bindings.PTrue, bindings.PTrue, nil, stdout, stderr) Expect(err).ShouldNot(HaveOccurred()) }() @@ -60,4 +63,48 @@ var _ = Describe("Podman containers attach", func() { // First character/First line of top output Expect(stdout.String()).Should(ContainSubstring("Mem: ")) }) + + It("can echo data via cat in container", func() { + s := specgen.NewSpecGenerator(alpine.name, false) + s.Name = "CatAttachTest" + s.Terminal = true + s.Command = []string{"/bin/cat"} + ctnr, err := containers.CreateWithSpec(bt.conn, s) + Expect(err).ShouldNot(HaveOccurred()) + + err = containers.Start(bt.conn, ctnr.ID, nil) + Expect(err).ShouldNot(HaveOccurred()) + + wait := define.ContainerStateRunning + _, err = containers.Wait(bt.conn, ctnr.ID, &wait) + Expect(err).ShouldNot(HaveOccurred()) + + tickTock := time.NewTimer(2 * time.Second) + go func() { + <-tickTock.C + timeout := uint(5) + err := containers.Stop(bt.conn, ctnr.ID, &timeout) + if err != nil { + GinkgoWriter.Write([]byte(err.Error())) + } + }() + + msg := "Hello, World" + stdin := &bytes.Buffer{} + stdin.WriteString(msg + "\n") + + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + go func() { + defer GinkgoRecover() + + err := containers.Attach(bt.conn, ctnr.ID, nil, bindings.PFalse, bindings.PTrue, stdin, stdout, stderr) + Expect(err).ShouldNot(HaveOccurred()) + }() + + time.Sleep(5 * time.Second) + // Tty==true so we get echo'ed stdin + expected output + Expect(stdout.String()).Should(Equal(fmt.Sprintf("%[1]s\r\n%[1]s\r\n", msg))) + Expect(stderr.String()).Should(BeEmpty()) + }) }) diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go index d130c146a..f725d1cf2 100644 --- a/pkg/bindings/test/containers_test.go +++ b/pkg/bindings/test/containers_test.go @@ -56,7 +56,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a running container by name", func() { // Pausing by name should work var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -70,7 +70,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a running container by id", func() { // Pausing by id should work var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) @@ -84,7 +84,7 @@ var _ = Describe("Podman containers ", func() { It("podman unpause a running container by name", func() { // Unpausing by name should work var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -100,7 +100,7 @@ var _ = Describe("Podman containers ", func() { It("podman unpause a running container by ID", func() { // Unpausing by ID should work var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Pause by name err = containers.Pause(bt.conn, name) @@ -119,7 +119,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a paused container by name", func() { // Pausing a paused container by name should fail var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -132,7 +132,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a paused container by id", func() { // Pausing a paused container by id should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) @@ -145,7 +145,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a stopped container by name", func() { // Pausing a stopped container by name should fail var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -158,7 +158,7 @@ var _ = Describe("Podman containers ", func() { It("podman pause a stopped container by id", func() { // Pausing a stopped container by id should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, cid, nil) Expect(err).To(BeNil()) @@ -171,11 +171,11 @@ var _ = Describe("Podman containers ", func() { It("podman remove a paused container by id without force", func() { // Removing a paused container without force should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Remove(bt.conn, cid, &bindings.PFalse, &bindings.PFalse) + err = containers.Remove(bt.conn, cid, bindings.PFalse, bindings.PFalse) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -192,18 +192,18 @@ var _ = Describe("Podman containers ", func() { // Removing a paused container with force should work var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) - err = containers.Remove(bt.conn, cid, &bindings.PTrue, &bindings.PFalse) + err = containers.Remove(bt.conn, cid, bindings.PTrue, bindings.PFalse) Expect(err).To(BeNil()) }) It("podman stop a paused container by name", func() { // Stopping a paused container by name should fail var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, name) Expect(err).To(BeNil()) @@ -216,7 +216,7 @@ var _ = Describe("Podman containers ", func() { It("podman stop a paused container by id", func() { // Stopping a paused container by id should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Pause(bt.conn, cid) Expect(err).To(BeNil()) @@ -229,7 +229,7 @@ var _ = Describe("Podman containers ", func() { It("podman stop a running container by name", func() { // Stopping a running container by name should work var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -243,7 +243,7 @@ var _ = Describe("Podman containers ", func() { It("podman stop a running container by ID", func() { // Stopping a running container by ID should work var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, cid, nil) Expect(err).To(BeNil()) @@ -326,7 +326,7 @@ var _ = Describe("Podman containers ", func() { // a container that has no healthcheck should be a 409 var name = "top" - bt.RunTopContainer(&name, &bindings.PFalse, nil) + bt.RunTopContainer(&name, bindings.PFalse, nil) _, err = containers.RunHealthCheck(bt.conn, name) Expect(err).ToNot(BeNil()) code, _ = bindings.CheckResponseCode(err) @@ -373,7 +373,7 @@ var _ = Describe("Podman containers ", func() { _, err = containers.Wait(bt.conn, r.ID, nil) Expect(err).To(BeNil()) - opts := containers.LogOptions{Stdout: &bindings.PTrue, Follow: &bindings.PTrue} + opts := containers.LogOptions{Stdout: bindings.PTrue, Follow: bindings.PTrue} go func() { containers.Logs(bt.conn, r.ID, opts, stdoutChan, nil) }() @@ -385,7 +385,7 @@ var _ = Describe("Podman containers ", func() { It("podman top", func() { var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // By name @@ -423,7 +423,7 @@ var _ = Describe("Podman containers ", func() { It("podman container exists in local storage by name", func() { // Container existence check by name should work var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) containerExists, err := containers.Exists(bt.conn, name) Expect(err).To(BeNil()) @@ -433,7 +433,7 @@ var _ = Describe("Podman containers ", func() { It("podman container exists in local storage by ID", func() { // Container existence check by ID should work var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) containerExists, err := containers.Exists(bt.conn, cid) Expect(err).To(BeNil()) @@ -443,7 +443,7 @@ var _ = Describe("Podman containers ", func() { It("podman container exists in local storage by short ID", func() { // Container existence check by short ID should work var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) containerExists, err := containers.Exists(bt.conn, cid[0:12]) Expect(err).To(BeNil()) @@ -461,7 +461,7 @@ var _ = Describe("Podman containers ", func() { It("podman kill a running container by name with SIGINT", func() { // Killing a running container should work var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Kill(bt.conn, name, "SIGINT") Expect(err).To(BeNil()) @@ -472,7 +472,7 @@ var _ = Describe("Podman containers ", func() { It("podman kill a running container by ID with SIGTERM", func() { // Killing a running container by ID should work var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Kill(bt.conn, cid, "SIGTERM") Expect(err).To(BeNil()) @@ -483,7 +483,7 @@ var _ = Describe("Podman containers ", func() { It("podman kill a running container by ID with SIGKILL", func() { // Killing a running container by ID with TERM should work var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Kill(bt.conn, cid, "SIGKILL") Expect(err).To(BeNil()) @@ -492,7 +492,7 @@ var _ = Describe("Podman containers ", func() { It("podman kill a running container by bogus signal", func() { //Killing a running container by bogus signal should fail var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Kill(bt.conn, cid, "foobar") Expect(err).ToNot(BeNil()) @@ -505,9 +505,9 @@ var _ = Describe("Podman containers ", func() { var name1 = "first" var name2 = "second" var latestContainers = 1 - _, err := bt.RunTopContainer(&name1, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name1, bindings.PFalse, nil) Expect(err).To(BeNil()) - _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil) + _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil) Expect(err).To(BeNil()) containerLatestList, err := containers.List(bt.conn, nil, nil, &latestContainers, nil, nil, nil) Expect(err).To(BeNil()) @@ -536,7 +536,7 @@ var _ = Describe("Podman containers ", func() { It("podman prune stopped containers", func() { // Start and stop a container to enter in exited state. var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -551,7 +551,7 @@ var _ = Describe("Podman containers ", func() { It("podman prune stopped containers with filters", func() { // Start and stop a container to enter in exited state. var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -585,7 +585,7 @@ var _ = Describe("Podman containers ", func() { It("podman prune running containers", func() { // Start the container. var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Check if the container is running. @@ -608,7 +608,7 @@ var _ = Describe("Podman containers ", func() { It("podman inspect running container", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Inspecting running container should succeed _, err = containers.Inspect(bt.conn, name, nil) @@ -617,7 +617,7 @@ var _ = Describe("Podman containers ", func() { It("podman inspect stopped container", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) @@ -628,20 +628,20 @@ var _ = Describe("Podman containers ", func() { It("podman inspect running container with size", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) - _, err = containers.Inspect(bt.conn, name, &bindings.PTrue) + _, err = containers.Inspect(bt.conn, name, bindings.PTrue) Expect(err).To(BeNil()) }) It("podman inspect stopped container with size", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) // Inspecting stopped container with size should succeed - _, err = containers.Inspect(bt.conn, name, &bindings.PTrue) + _, err = containers.Inspect(bt.conn, name, bindings.PTrue) Expect(err).To(BeNil()) }) @@ -653,7 +653,7 @@ var _ = Describe("Podman containers ", func() { It("podman remove running container by name", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail err = containers.Remove(bt.conn, name, nil, nil) @@ -664,7 +664,7 @@ var _ = Describe("Podman containers ", func() { It("podman remove running container by ID", func() { var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail err = containers.Remove(bt.conn, cid, nil, nil) @@ -675,10 +675,10 @@ var _ = Describe("Podman containers ", func() { It("podman forcibly remove running container by name", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail - err = containers.Remove(bt.conn, name, &bindings.PTrue, nil) + err = containers.Remove(bt.conn, name, bindings.PTrue, nil) Expect(err).To(BeNil()) //code, _ := bindings.CheckResponseCode(err) //Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -686,10 +686,10 @@ var _ = Describe("Podman containers ", func() { It("podman forcibly remove running container by ID", func() { var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail - err = containers.Remove(bt.conn, cid, &bindings.PTrue, nil) + err = containers.Remove(bt.conn, cid, bindings.PTrue, nil) Expect(err).To(BeNil()) //code, _ := bindings.CheckResponseCode(err) //Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -697,10 +697,10 @@ var _ = Describe("Podman containers ", func() { It("podman remove running container and volume by name", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail - err = containers.Remove(bt.conn, name, nil, &bindings.PTrue) + err = containers.Remove(bt.conn, name, nil, bindings.PTrue) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -708,10 +708,10 @@ var _ = Describe("Podman containers ", func() { It("podman remove running container and volume by ID", func() { var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail - err = containers.Remove(bt.conn, cid, nil, &bindings.PTrue) + err = containers.Remove(bt.conn, cid, nil, bindings.PTrue) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -719,10 +719,10 @@ var _ = Describe("Podman containers ", func() { It("podman forcibly remove running container and volume by name", func() { var name = "top" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail - err = containers.Remove(bt.conn, name, &bindings.PTrue, &bindings.PTrue) + err = containers.Remove(bt.conn, name, bindings.PTrue, bindings.PTrue) Expect(err).To(BeNil()) //code, _ := bindings.CheckResponseCode(err) //Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) @@ -730,10 +730,10 @@ var _ = Describe("Podman containers ", func() { It("podman forcibly remove running container and volume by ID", func() { var name = "top" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) // Removing running container should fail - err = containers.Remove(bt.conn, cid, &bindings.PTrue, &bindings.PTrue) + err = containers.Remove(bt.conn, cid, bindings.PTrue, bindings.PTrue) Expect(err).To(BeNil()) //code, _ := bindings.CheckResponseCode(err) //Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) diff --git a/pkg/bindings/test/exec_test.go b/pkg/bindings/test/exec_test.go index 1ef2197b6..53b2dcb4a 100644 --- a/pkg/bindings/test/exec_test.go +++ b/pkg/bindings/test/exec_test.go @@ -33,7 +33,7 @@ var _ = Describe("Podman containers exec", func() { It("Podman exec create makes an exec session", func() { name := "testCtr" - cid, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + cid, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) execConfig := new(handlers.ExecCreateConfig) @@ -53,7 +53,7 @@ var _ = Describe("Podman containers exec", func() { It("Podman exec create with bad command fails", func() { name := "testCtr" - _, err := bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err := bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) execConfig := new(handlers.ExecCreateConfig) diff --git a/pkg/bindings/test/images_test.go b/pkg/bindings/test/images_test.go index 9c8e82149..f2a1a51e5 100644 --- a/pkg/bindings/test/images_test.go +++ b/pkg/bindings/test/images_test.go @@ -76,7 +76,7 @@ var _ = Describe("Podman images", func() { // Expect(data.Size).To(BeZero()) // Enabling the size parameter should result in size being populated - data, err = images.GetImage(bt.conn, alpine.name, &bindings.PTrue) + data, err = images.GetImage(bt.conn, alpine.name, bindings.PTrue) Expect(err).To(BeNil()) Expect(data.Size).To(BeNumerically(">", 0)) }) @@ -104,7 +104,7 @@ var _ = Describe("Podman images", func() { // Start a container with alpine image var top string = "top" - _, err = bt.RunTopContainer(&top, &bindings.PFalse, nil) + _, err = bt.RunTopContainer(&top, bindings.PFalse, nil) Expect(err).To(BeNil()) // we should now have a container called "top" running containerResponse, err := containers.Inspect(bt.conn, "top", nil) @@ -122,7 +122,7 @@ var _ = Describe("Podman images", func() { Expect(err).To(BeNil()) // To be extra sure, check if the previously created container // is gone as well. - _, err = containers.Inspect(bt.conn, "top", &bindings.PFalse) + _, err = containers.Inspect(bt.conn, "top", bindings.PFalse) code, _ = bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusNotFound)) @@ -182,13 +182,13 @@ var _ = Describe("Podman images", func() { // List images with a filter filters := make(map[string][]string) filters["reference"] = []string{alpine.name} - filteredImages, err := images.List(bt.conn, &bindings.PFalse, filters) + filteredImages, err := images.List(bt.conn, bindings.PFalse, filters) Expect(err).To(BeNil()) Expect(len(filteredImages)).To(BeNumerically("==", 1)) // List images with a bad filter filters["name"] = []string{alpine.name} - _, err = images.List(bt.conn, &bindings.PFalse, filters) + _, err = images.List(bt.conn, bindings.PFalse, filters) Expect(err).ToNot(BeNil()) code, _ := bindings.CheckResponseCode(err) Expect(code).To(BeNumerically("==", http.StatusInternalServerError)) diff --git a/pkg/bindings/test/pods_test.go b/pkg/bindings/test/pods_test.go index 49bbfa246..d8e2a5ef7 100644 --- a/pkg/bindings/test/pods_test.go +++ b/pkg/bindings/test/pods_test.go @@ -63,7 +63,7 @@ var _ = Describe("Podman pods", func() { Expect(err).To(BeNil()) // Adding an alpine container to the existing pod - _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod) + _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod) Expect(err).To(BeNil()) podSummary, err = pods.List(bt.conn, nil) // Verify no errors. @@ -93,7 +93,7 @@ var _ = Describe("Podman pods", func() { _, err = pods.Start(bt.conn, newpod) Expect(err).To(BeNil()) - _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod) + _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod) Expect(err).To(BeNil()) // Expected err with invalid filter params @@ -174,7 +174,7 @@ var _ = Describe("Podman pods", func() { Expect(code).To(BeNumerically("==", http.StatusNotFound)) // Adding an alpine container to the existing pod - _, err = bt.RunTopContainer(nil, &bindings.PTrue, &newpod) + _, err = bt.RunTopContainer(nil, bindings.PTrue, &newpod) Expect(err).To(BeNil()) // Binding needs to be modified to inspect the pod state. diff --git a/pkg/bindings/test/system_test.go b/pkg/bindings/test/system_test.go index 76f0b074b..fb2df258b 100644 --- a/pkg/bindings/test/system_test.go +++ b/pkg/bindings/test/system_test.go @@ -65,12 +65,12 @@ var _ = Describe("Podman system", func() { Expect(err).To(BeNil()) // Start and stop a container to enter in exited state. var name = "top" - _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err = bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) - systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PFalse) + systemPruneResponse, err := system.Prune(bt.conn, bindings.PTrue, bindings.PFalse) Expect(err).To(BeNil()) Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(1)) Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1)) @@ -90,21 +90,21 @@ var _ = Describe("Podman system", func() { // Start and stop a container to enter in exited state. var name = "top" - _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err = bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) // Start container and leave in running var name2 = "top2" - _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil) + _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil) Expect(err).To(BeNil()) // Adding an unused volume _, err = volumes.Create(bt.conn, entities.VolumeCreateOptions{}) Expect(err).To(BeNil()) - systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PFalse) + systemPruneResponse, err := system.Prune(bt.conn, bindings.PTrue, bindings.PFalse) Expect(err).To(BeNil()) Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(1)) Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1)) @@ -124,21 +124,21 @@ var _ = Describe("Podman system", func() { // Start and stop a container to enter in exited state. var name = "top" - _, err = bt.RunTopContainer(&name, &bindings.PFalse, nil) + _, err = bt.RunTopContainer(&name, bindings.PFalse, nil) Expect(err).To(BeNil()) err = containers.Stop(bt.conn, name, nil) Expect(err).To(BeNil()) // Start second container and leave in running var name2 = "top2" - _, err = bt.RunTopContainer(&name2, &bindings.PFalse, nil) + _, err = bt.RunTopContainer(&name2, bindings.PFalse, nil) Expect(err).To(BeNil()) // Adding an unused volume should work _, err = volumes.Create(bt.conn, entities.VolumeCreateOptions{}) Expect(err).To(BeNil()) - systemPruneResponse, err := system.Prune(bt.conn, &bindings.PTrue, &bindings.PTrue) + systemPruneResponse, err := system.Prune(bt.conn, bindings.PTrue, bindings.PTrue) Expect(err).To(BeNil()) Expect(len(systemPruneResponse.PodPruneReport)).To(Equal(0)) Expect(len(systemPruneResponse.ContainerPruneReport.ID)).To(Equal(1)) @@ -182,7 +182,7 @@ var _ = Describe("Podman system", func() { Expect(len(podSummary)).To(Equal(0)) // No images - imageSummary, err = images.List(bt.conn, &bindings.PTrue, nil) + imageSummary, err = images.List(bt.conn, bindings.PTrue, nil) Expect(err).To(BeNil()) Expect(len(imageSummary)).To(Equal(0)) diff --git a/pkg/bindings/test/volumes_test.go b/pkg/bindings/test/volumes_test.go index 59fe48f22..839a4c575 100644 --- a/pkg/bindings/test/volumes_test.go +++ b/pkg/bindings/test/volumes_test.go @@ -105,7 +105,7 @@ var _ = Describe("Podman volumes", func() { zero := uint(0) err = containers.Stop(connText, "vtest", &zero) Expect(err).To(BeNil()) - err = volumes.Remove(connText, vol.Name, &bindings.PTrue) + err = volumes.Remove(connText, vol.Name, bindings.PTrue) Expect(err).To(BeNil()) }) diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go index e5330e1ab..3cc4b6db1 100644 --- a/pkg/domain/entities/containers.go +++ b/pkg/domain/entities/containers.go @@ -170,7 +170,7 @@ type CheckpointOptions struct { IgnoreRootFS bool Keep bool Latest bool - LeaveRuninng bool + LeaveRunning bool TCPEstablished bool } diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 249e8147c..035efe575 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -434,6 +434,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ TCPEstablished: options.TCPEstablished, TargetFile: options.Export, IgnoreRootfs: options.IgnoreRootFS, + KeepRunning: options.LeaveRunning, } if options.All { diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go index 5c39b5374..dfde3a939 100644 --- a/pkg/domain/infra/abi/network.go +++ b/pkg/domain/infra/abi/network.go @@ -15,26 +15,15 @@ import ( "github.com/pkg/errors" ) -func getCNIConfDir(r *libpod.Runtime) (string, error) { - config, err := r.GetConfig() - if err != nil { - return "", err - } - configPath := config.Network.NetworkConfigDir - - if len(config.Network.NetworkConfigDir) < 1 { - configPath = network.CNIConfigDir - } - return configPath, nil -} - func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) { var reports []*entities.NetworkListReport - cniConfigPath, err := getCNIConfDir(ic.Libpod) + + config, err := ic.Libpod.GetConfig() if err != nil { return nil, err } - networks, err := network.LoadCNIConfsFromDir(cniConfigPath) + + networks, err := network.LoadCNIConfsFromDir(network.GetCNIConfDir(config)) if err != nil { return nil, err } @@ -49,8 +38,14 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri var ( rawCNINetworks []entities.NetworkInspectReport ) + + config, err := ic.Libpod.GetConfig() + if err != nil { + return nil, err + } + for _, name := range namesOrIds { - rawList, err := network.InspectNetwork(name) + rawList, err := network.InspectNetwork(config, name) if err != nil { return nil, err } @@ -61,6 +56,12 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) { var reports []*entities.NetworkRmReport + + config, err := ic.Libpod.GetConfig() + if err != nil { + return nil, err + } + for _, name := range namesOrIds { report := entities.NetworkRmReport{Name: name} containers, err := ic.Libpod.GetAllContainers() @@ -80,7 +81,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o } } } - if err := network.RemoveNetwork(name); err != nil { + if err := network.RemoveNetwork(config, name); err != nil { report.Err = err } reports = append(reports, &report) @@ -117,10 +118,10 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate // if range is provided, make sure it is "in" network if subnet.IP != nil { // if network is provided, does it conflict with existing CNI or live networks - err = network.ValidateUserNetworkIsAvailable(subnet) + err = network.ValidateUserNetworkIsAvailable(runtimeConfig, subnet) } else { // if no network is provided, figure out network - subnet, err = network.GetFreeNetwork() + subnet, err = network.GetFreeNetwork(runtimeConfig) } if err != nil { return "", err @@ -158,13 +159,13 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate return "", errors.Errorf("the ip range %s does not fall within the subnet range %s", options.Range.String(), subnet.String()) } } - bridgeDeviceName, err := network.GetFreeDeviceName() + bridgeDeviceName, err := network.GetFreeDeviceName(runtimeConfig) if err != nil { return "", err } if len(name) > 0 { - netNames, err := network.GetNetworkNamesFromFileSystem() + netNames, err := network.GetNetworkNamesFromFileSystem(runtimeConfig) if err != nil { return "", err } @@ -205,11 +206,7 @@ func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreate if err != nil { return "", err } - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return "", err - } - cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name)) + cniPathName := filepath.Join(network.GetCNIConfDir(runtimeConfig), fmt.Sprintf("%s.conflist", name)) err = ioutil.WriteFile(cniPathName, b, 0644) return cniPathName, err } @@ -222,12 +219,18 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat if err != nil { return "", err } + + config, err := r.GetConfig() + if err != nil { + return "", err + } + // Make sure the host-device exists if !util.StringInSlice(options.MacVLAN, liveNetNames) { return "", errors.Errorf("failed to find network interface %q", options.MacVLAN) } if len(name) > 0 { - netNames, err := network.GetNetworkNamesFromFileSystem() + netNames, err := network.GetNetworkNamesFromFileSystem(config) if err != nil { return "", err } @@ -235,7 +238,7 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat return "", errors.Errorf("the network name %s is already used", name) } } else { - name, err = network.GetFreeDeviceName() + name, err = network.GetFreeDeviceName(config) if err != nil { return "", err } @@ -248,11 +251,7 @@ func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreat if err != nil { return "", err } - cniConfigPath, err := getCNIConfDir(r) - if err != nil { - return "", err - } - cniPathName := filepath.Join(cniConfigPath, fmt.Sprintf("%s.conflist", name)) + cniPathName := filepath.Join(network.GetCNIConfDir(config), fmt.Sprintf("%s.conflist", name)) err = ioutil.WriteFile(cniPathName, b, 0644) return cniPathName, err } diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 028e3bc5f..cebd332e3 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -8,10 +8,12 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/bindings" "github.com/containers/libpod/pkg/bindings/containers" "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/specgen" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string, image string, args []string, options entities.ContainerRunlabelOptions) error { @@ -267,7 +269,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ } } for _, c := range ctrs { - report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRuninng, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export) + report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRunning, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export) if err != nil { reports = append(reports, &entities.CheckpointReport{Id: c.ID, Err: err}) } @@ -324,15 +326,39 @@ func (ic *ContainerEngine) ContainerLogs(ctx context.Context, containers []strin } func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, options entities.AttachOptions) error { - return errors.New("not implemented") + return containers.Attach(ic.ClientCxt, nameOrId, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr) } func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) { return 125, errors.New("not implemented") } +func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, output, errput *os.File) error { //nolint + attachErr := make(chan error) + go func() { + err := containers.Attach(ic.ClientCxt, name, detachKeys, bindings.PFalse, bindings.PTrue, input, output, errput) + attachErr <- err + }() + + if err := containers.Start(ic.ClientCxt, name, detachKeys); err != nil { + return err + } + return <-attachErr +} + func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { - return nil, errors.New("not implemented") + var reports []*entities.ContainerStartReport + for _, name := range namesOrIds { + report := entities.ContainerStartReport{Id: name} + if options.Attach { + report.Err = startAndAttach(ic, name, &options.DetachKeys, options.Stdin, options.Stdout, options.Stderr) + reports = append(reports, &report) + return reports, nil + } + report.Err = containers.Start(ic.ClientCxt, name, &options.DetachKeys) + reports = append(reports, &report) + } + return reports, nil } func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.ContainerListOptions) ([]entities.ListContainer, error) { @@ -340,7 +366,23 @@ func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.C } func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.ContainerRunOptions) (*entities.ContainerRunReport, error) { - return nil, errors.New("not implemented") + if opts.Rm { + logrus.Info("the remote client does not support --rm yet") + } + con, err := containers.CreateWithSpec(ic.ClientCxt, opts.Spec) + if err != nil { + return nil, err + } + report := entities.ContainerRunReport{Id: con.ID} + // Attach + if !opts.Detach { + err = startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream) + + } else { + err = containers.Start(ic.ClientCxt, con.ID, nil) + } + report.ExitCode = define.ExitCode(err) + return &report, err } func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrId string, _ entities.DiffOptions) (*entities.DiffReport, error) { diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go index 682d60d6a..09791a3b9 100644 --- a/pkg/domain/infra/tunnel/helpers.go +++ b/pkg/domain/infra/tunnel/helpers.go @@ -20,7 +20,7 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam if all && len(namesOrIds) > 0 { return nil, errors.New("cannot lookup containers and all") } - c, err := containers.List(contextWithConnection, nil, &bindings.PTrue, nil, nil, nil, &bindings.PTrue) + c, err := containers.List(contextWithConnection, nil, bindings.PTrue, nil, nil, nil, bindings.PTrue) if err != nil { return nil, err } diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index eb25dc4a3..5a849d362 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -112,7 +112,7 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string, func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string, options entities.ImageUntagOptions) error { // Remove all tags if none are provided if len(tags) == 0 { - newImage, err := images.GetImage(ir.ClientCxt, nameOrId, &bindings.PFalse) + newImage, err := images.GetImage(ir.ClientCxt, nameOrId, bindings.PFalse) if err != nil { return err } diff --git a/pkg/network/devices.go b/pkg/network/devices.go index 78e1a5aa5..8eac32142 100644 --- a/pkg/network/devices.go +++ b/pkg/network/devices.go @@ -4,6 +4,7 @@ import ( "fmt" "os/exec" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/pkg/util" "github.com/containers/libpod/utils" "github.com/sirupsen/logrus" @@ -11,12 +12,12 @@ import ( // GetFreeDeviceName returns a device name that is unused; used when no network // name is provided by user -func GetFreeDeviceName() (string, error) { +func GetFreeDeviceName(config *config.Config) (string, error) { var ( deviceNum uint deviceName string ) - networkNames, err := GetNetworkNamesFromFileSystem() + networkNames, err := GetNetworkNamesFromFileSystem(config) if err != nil { return "", err } @@ -24,7 +25,7 @@ func GetFreeDeviceName() (string, error) { if err != nil { return "", err } - bridgeNames, err := GetBridgeNamesFromFileSystem() + bridgeNames, err := GetBridgeNamesFromFileSystem(config) if err != nil { return "", err } diff --git a/pkg/network/files.go b/pkg/network/files.go index 116189c43..81c0e1a28 100644 --- a/pkg/network/files.go +++ b/pkg/network/files.go @@ -9,9 +9,17 @@ import ( "github.com/containernetworking/cni/libcni" "github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator" + "github.com/containers/common/pkg/config" "github.com/pkg/errors" ) +func GetCNIConfDir(config *config.Config) string { + if len(config.Network.NetworkConfigDir) < 1 { + return CNIConfigDir + } + return config.Network.NetworkConfigDir +} + // LoadCNIConfsFromDir loads all the CNI configurations from a dir func LoadCNIConfsFromDir(dir string) ([]*libcni.NetworkConfigList, error) { var configs []*libcni.NetworkConfigList @@ -33,8 +41,8 @@ func LoadCNIConfsFromDir(dir string) ([]*libcni.NetworkConfigList, error) { // GetCNIConfigPathByName finds a CNI network by name and // returns its configuration file path -func GetCNIConfigPathByName(name string) (string, error) { - files, err := libcni.ConfFiles(CNIConfigDir, []string{".conflist"}) +func GetCNIConfigPathByName(config *config.Config, name string) (string, error) { + files, err := libcni.ConfFiles(GetCNIConfDir(config), []string{".conflist"}) if err != nil { return "", err } @@ -52,8 +60,8 @@ func GetCNIConfigPathByName(name string) (string, error) { // ReadRawCNIConfByName reads the raw CNI configuration for a CNI // network by name -func ReadRawCNIConfByName(name string) ([]byte, error) { - confFile, err := GetCNIConfigPathByName(name) +func ReadRawCNIConfByName(config *config.Config, name string) ([]byte, error) { + confFile, err := GetCNIConfigPathByName(config, name) if err != nil { return nil, err } @@ -73,9 +81,10 @@ func GetCNIPlugins(list *libcni.NetworkConfigList) string { // GetNetworksFromFilesystem gets all the networks from the cni configuration // files -func GetNetworksFromFilesystem() ([]*allocator.Net, error) { +func GetNetworksFromFilesystem(config *config.Config) ([]*allocator.Net, error) { var cniNetworks []*allocator.Net - networks, err := LoadCNIConfsFromDir(CNIConfigDir) + + networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config)) if err != nil { return nil, err } @@ -96,9 +105,10 @@ func GetNetworksFromFilesystem() ([]*allocator.Net, error) { // GetNetworkNamesFromFileSystem gets all the names from the cni network // configuration files -func GetNetworkNamesFromFileSystem() ([]string, error) { +func GetNetworkNamesFromFileSystem(config *config.Config) ([]string, error) { var networkNames []string - networks, err := LoadCNIConfsFromDir(CNIConfigDir) + + networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config)) if err != nil { return nil, err } @@ -133,9 +143,10 @@ func GetInterfaceNameFromConfig(path string) (string, error) { // GetBridgeNamesFromFileSystem is a convenience function to get all the bridge // names from the configured networks -func GetBridgeNamesFromFileSystem() ([]string, error) { +func GetBridgeNamesFromFileSystem(config *config.Config) ([]string, error) { var bridgeNames []string - networks, err := LoadCNIConfsFromDir(CNIConfigDir) + + networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config)) if err != nil { return nil, err } diff --git a/pkg/network/network.go b/pkg/network/network.go index bb6f13579..5e9062019 100644 --- a/pkg/network/network.go +++ b/pkg/network/network.go @@ -7,6 +7,7 @@ import ( "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -56,8 +57,8 @@ func GetLiveNetworkNames() ([]string, error) { // GetFreeNetwork looks for a free network according to existing cni configuration // files and network interfaces. -func GetFreeNetwork() (*net.IPNet, error) { - networks, err := GetNetworksFromFilesystem() +func GetFreeNetwork(config *config.Config) (*net.IPNet, error) { + networks, err := GetNetworksFromFilesystem(config) if err != nil { return nil, err } @@ -131,8 +132,8 @@ func networkIntersect(n1, n2 *net.IPNet) bool { // ValidateUserNetworkIsAvailable returns via an error if a network is available // to be used -func ValidateUserNetworkIsAvailable(userNet *net.IPNet) error { - networks, err := GetNetworksFromFilesystem() +func ValidateUserNetworkIsAvailable(config *config.Config, userNet *net.IPNet) error { + networks, err := GetNetworksFromFilesystem(config) if err != nil { return err } @@ -153,8 +154,8 @@ func ValidateUserNetworkIsAvailable(userNet *net.IPNet) error { // RemoveNetwork removes a given network by name. If the network has container associated with it, that // must be handled outside the context of this. -func RemoveNetwork(name string) error { - cniPath, err := GetCNIConfigPathByName(name) +func RemoveNetwork(config *config.Config, name string) error { + cniPath, err := GetCNIConfigPathByName(config, name) if err != nil { return err } @@ -181,8 +182,8 @@ func RemoveNetwork(name string) error { } // InspectNetwork reads a CNI config and returns its configuration -func InspectNetwork(name string) (map[string]interface{}, error) { - b, err := ReadRawCNIConfByName(name) +func InspectNetwork(config *config.Config, name string) (map[string]interface{}, error) { + b, err := ReadRawCNIConfByName(config, name) if err != nil { return nil, err } diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go index 0636af74c..289f23b54 100644 --- a/test/e2e/events_test.go +++ b/test/e2e/events_test.go @@ -137,5 +137,19 @@ var _ = Describe("Podman events", func() { _, exist := eventsMap["Status"] Expect(exist).To(BeTrue()) Expect(test.ExitCode()).To(BeZero()) + + test = podmanTest.Podman([]string{"events", "--stream=false", "--format", "{{json.}}"}) + test.WaitWithDefaultTimeout() + fmt.Println(test.OutputToStringArray()) + jsonArr = test.OutputToStringArray() + Expect(len(jsonArr)).To(Not(BeZero())) + eventsMap = make(map[string]string) + err = json.Unmarshal([]byte(jsonArr[0]), &eventsMap) + if err != nil { + os.Exit(1) + } + _, exist = eventsMap["Status"] + Expect(exist).To(BeTrue()) + Expect(test.ExitCode()).To(BeZero()) }) }) |