summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml20
-rw-r--r--Makefile17
-rw-r--r--README.md2
-rw-r--r--RELEASE_NOTES.md17
-rw-r--r--cmd/podman/common/create.go18
-rw-r--r--cmd/podman/common/util.go13
-rw-r--r--cmd/podman/common/volumes.go44
-rw-r--r--cmd/podman/containers/create.go16
-rw-r--r--cmd/podman/containers/exec.go7
-rw-r--r--cmd/podman/containers/run.go10
-rw-r--r--cmd/podman/containers/start.go11
-rw-r--r--cmd/podman/registry/remote.go26
-rw-r--r--cmd/podman/system/service.go5
-rw-r--r--contrib/spec/podman.spec.in4
-rw-r--r--contrib/systemd/system/podman.service4
-rw-r--r--docs/source/markdown/podman-create.1.md18
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md12
-rw-r--r--docs/source/markdown/podman-run.1.md16
-rw-r--r--go.mod4
-rw-r--r--go.sum8
-rw-r--r--libpod/boltdb_state.go12
-rw-r--r--libpod/boltdb_state_internal.go12
-rw-r--r--libpod/container_config.go125
-rw-r--r--libpod/container_exec.go14
-rw-r--r--libpod/container_internal_linux.go27
-rw-r--r--libpod/define/errors.go16
-rw-r--r--libpod/events/journal_linux.go3
-rw-r--r--libpod/image/errors.go11
-rw-r--r--libpod/image/filters.go20
-rw-r--r--libpod/image/image.go140
-rw-r--r--libpod/image/layer_tree.go222
-rw-r--r--libpod/image/prune.go11
-rw-r--r--libpod/image/pull.go19
-rw-r--r--libpod/logs/log.go12
-rw-r--r--libpod/logs/reversereader/reversereader.go4
-rw-r--r--libpod/networking_linux.go91
-rw-r--r--libpod/oci_conmon_exec_linux.go17
-rw-r--r--libpod/options.go13
-rw-r--r--nix/nixpkgs.json6
-rw-r--r--pkg/api/handlers/compat/images_build.go9
-rw-r--r--pkg/api/handlers/compat/networks.go7
-rw-r--r--pkg/api/handlers/libpod/containers.go1
-rw-r--r--pkg/api/handlers/libpod/images.go10
-rw-r--r--pkg/api/handlers/libpod/networks.go6
-rw-r--r--pkg/api/handlers/utils/errors.go9
-rw-r--r--pkg/api/handlers/utils/images.go20
-rw-r--r--pkg/api/server/register_images.go4
-rw-r--r--pkg/api/server/register_ping.go9
-rw-r--r--pkg/api/server/register_volumes.go2
-rw-r--r--pkg/bindings/containers/containers.go2
-rw-r--r--pkg/bindings/images/images.go1
-rw-r--r--pkg/domain/entities/volumes.go56
-rw-r--r--pkg/domain/infra/abi/images.go65
-rw-r--r--pkg/domain/infra/abi/images_list.go17
-rw-r--r--pkg/domain/infra/tunnel/containers.go14
-rw-r--r--pkg/domain/infra/tunnel/images.go6
-rw-r--r--pkg/namespaces/namespaces.go2
-rw-r--r--pkg/network/config.go5
-rw-r--r--pkg/network/files.go3
-rw-r--r--pkg/network/network.go3
-rw-r--r--pkg/rootless/rootless_linux.c12
-rw-r--r--pkg/rootless/rootless_linux.go6
-rw-r--r--pkg/rootless/rootless_unsupported.go5
-rw-r--r--pkg/specgen/generate/container_create.go11
-rw-r--r--pkg/specgen/generate/namespaces.go4
-rw-r--r--pkg/trust/trust.go4
-rw-r--r--test/apiv2/01-basic.at8
-rw-r--r--test/apiv2/35-networks.at8
-rw-r--r--test/apiv2/rest_api/test_rest_v1_0_0.py21
-rw-r--r--test/e2e/commit_test.go15
-rw-r--r--test/e2e/exec_test.go3
-rw-r--r--test/e2e/image_sign_test.go62
-rw-r--r--test/e2e/logs_test.go8
-rw-r--r--test/e2e/run_networking_test.go62
-rw-r--r--test/e2e/run_test.go33
-rw-r--r--test/e2e/run_volume_test.go6
-rw-r--r--test/e2e/search_test.go10
-rw-r--r--test/e2e/sign/secret-key.asc57
-rw-r--r--test/e2e/start_test.go12
-rw-r--r--test/endpoint/endpoint.go4
-rw-r--r--test/system/001-basic.bats31
-rw-r--r--test/system/030-run.bats9
-rw-r--r--test/system/035-logs.bats2
-rw-r--r--test/system/050-stop.bats7
-rw-r--r--test/system/055-rm.bats2
-rw-r--r--test/system/070-build.bats36
-rw-r--r--test/system/075-exec.bats11
-rw-r--r--test/system/110-history.bats2
-rw-r--r--test/system/120-load.bats4
-rw-r--r--test/system/130-kill.bats2
-rw-r--r--test/system/140-diff.bats13
-rw-r--r--test/system/160-volumes.bats2
-rw-r--r--test/system/200-pod.bats14
-rw-r--r--test/system/220-healthcheck.bats1
-rw-r--r--test/system/400-unprivileged-access.bats5
-rw-r--r--test/system/helpers.bash22
-rw-r--r--test/utils/utils.go20
-rw-r--r--utils/utils.go3
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml58
-rw-r--r--vendor/github.com/containers/buildah/.gitignore1
-rw-r--r--vendor/github.com/containers/buildah/Makefile23
-rw-r--r--vendor/github.com/containers/buildah/README.md2
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go19
-rw-r--r--vendor/github.com/containers/buildah/digester.go230
-rw-r--r--vendor/github.com/containers/buildah/go.mod8
-rw-r--r--vendor/github.com/containers/buildah/go.sum15
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go3
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go46
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go36
-rw-r--r--vendor/github.com/containers/buildah/install.md8
-rw-r--r--vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go5
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go14
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go25
-rw-r--r--vendor/github.com/containers/buildah/troubleshooting.md2
-rw-r--r--vendor/github.com/containers/buildah/util/util.go16
-rw-r--r--vendor/github.com/containers/common/pkg/retry/retry.go87
-rw-r--r--vendor/github.com/containers/ocicrypt/.travis.yml22
-rw-r--r--vendor/github.com/containers/ocicrypt/SECURITY.md3
-rw-r--r--vendor/github.com/containers/ocicrypt/go.mod8
-rw-r--r--vendor/github.com/containers/ocicrypt/go.sum36
-rw-r--r--vendor/github.com/containers/ocicrypt/gpg.go4
-rw-r--r--vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go2
-rw-r--r--vendor/github.com/fullsailor/pkcs7/.travis.yml7
-rw-r--r--vendor/github.com/fullsailor/pkcs7/README.md8
-rw-r--r--vendor/github.com/fullsailor/pkcs7/pkcs7.go962
-rw-r--r--vendor/github.com/fullsailor/pkcs7/x509.go133
-rw-r--r--vendor/go.mozilla.org/pkcs7/.gitignore (renamed from vendor/github.com/fullsailor/pkcs7/.gitignore)0
-rw-r--r--vendor/go.mozilla.org/pkcs7/.travis.yml10
-rw-r--r--vendor/go.mozilla.org/pkcs7/LICENSE (renamed from vendor/github.com/fullsailor/pkcs7/LICENSE)0
-rw-r--r--vendor/go.mozilla.org/pkcs7/Makefile20
-rw-r--r--vendor/go.mozilla.org/pkcs7/README.md69
-rw-r--r--vendor/go.mozilla.org/pkcs7/ber.go (renamed from vendor/github.com/fullsailor/pkcs7/ber.go)101
-rw-r--r--vendor/go.mozilla.org/pkcs7/decrypt.go177
-rw-r--r--vendor/go.mozilla.org/pkcs7/encrypt.go399
-rw-r--r--vendor/go.mozilla.org/pkcs7/go.mod3
-rw-r--r--vendor/go.mozilla.org/pkcs7/pkcs7.go291
-rw-r--r--vendor/go.mozilla.org/pkcs7/sign.go429
-rw-r--r--vendor/go.mozilla.org/pkcs7/verify.go264
-rw-r--r--vendor/modules.txt9
139 files changed, 3652 insertions, 1738 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 8169ba171..cd122d39f 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -760,21 +760,33 @@ success_task:
static_build_task:
depends_on:
- "gating"
+
gce_instance:
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
cpu: 8
memory: 12
disk: 200
- script: |
+
+ init_script: |
set -ex
setenforce 0
growpart /dev/sda 1 || true
resize2fs /dev/sda1 || true
yum -y install podman
+
+ nix_cache:
+ folder: '.cache'
+ fingerprint_script: |
+ echo "nix-v1-$(sha1sum nix/nixpkgs.json | head -c 40)"
+
+ build_script: |
+ set -ex
mkdir -p /nix
- podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix
+ mkdir -p .cache
+ mount --bind .cache /nix
+ if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
+ chown -Rf $(whoami) .cache
+
binaries_artifacts:
path: "result/bin/podman"
- on_failure:
- failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}'
diff --git a/Makefile b/Makefile
index 51c2704b7..70e4a49c7 100644
--- a/Makefile
+++ b/Makefile
@@ -353,21 +353,19 @@ remotesystem:
# Start podman server using tmp socket; loop-wait for it;
# test podman-remote; kill server, clean up tmp socket file.
# podman server spews copious unhelpful output; ignore it.
- # FIXME FIXME FIXME: remove 'exit 0' after #6538 and #6539 are fixed
- exit 0;\
rc=0;\
if timeout -v 1 true; then \
SOCK_FILE=$(shell mktemp --dry-run --tmpdir podman.XXXXXX);\
export PODMAN_SOCKET=unix:$$SOCK_FILE; \
- ./bin/podman system service --timeout=0 $$PODMAN_SOCKET &> $(if $(PODMAN_SERVER_LOG),$(PODMAN_SERVER_LOG),/dev/null) & \
+ ./bin/podman system service --timeout=0 $$PODMAN_SOCKET > $(if $(PODMAN_SERVER_LOG),$(PODMAN_SERVER_LOG),/dev/null) 2>&1 & \
retry=5;\
- while [[ $$retry -ge 0 ]]; do\
+ while [ $$retry -ge 0 ]; do\
echo Waiting for server...;\
sleep 1;\
- ./bin/podman-remote --url $$PODMAN_SOCKET info &>/dev/null && break;\
+ ./bin/podman-remote --url $$PODMAN_SOCKET info >/dev/null 2>&1 && break;\
retry=$$(expr $$retry - 1);\
done;\
- if [[ $$retry -lt 0 ]]; then\
+ if [ $$retry -lt 0 ]; then\
echo "Error: ./bin/podman system service did not come up on $$SOCK_FILE" >&2;\
exit 1;\
fi;\
@@ -572,9 +570,14 @@ endif
.PHONY: install.systemd
install.systemd: install.varlink
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR}
- # Install APIV2 services
+ # User services
+ install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.service ${DESTDIR}${USERSYSTEMDDIR}/podman-auto-update.service
+ install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.timer ${DESTDIR}${USERSYSTEMDDIR}/podman-auto-update.timer
install ${SELINUXOPT} -m 644 contrib/systemd/user/podman.socket ${DESTDIR}${USERSYSTEMDDIR}/podman.socket
install ${SELINUXOPT} -m 644 contrib/systemd/user/podman.service ${DESTDIR}${USERSYSTEMDDIR}/podman.service
+ # System services
+ install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.service ${DESTDIR}${SYSTEMDDIR}/podman-auto-update.service
+ install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.timer ${DESTDIR}${SYSTEMDDIR}/podman-auto-update.timer
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.socket ${DESTDIR}${SYSTEMDDIR}/podman.socket
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.service ${DESTDIR}${SYSTEMDDIR}/podman.service
diff --git a/README.md b/README.md
index 8065e4c49..50af1bdaf 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
Podman (the POD MANager) is a tool for managing containers and images, volumes mounted into those containers, and pods made from groups of containers.
Podman is based on libpod, a library for container lifecycle management that is also contained in this repository. The libpod library provides APIs for managing containers, pods, container images, and volumes.
-* [Latest Version: 2.0.3](https://github.com/containers/podman/releases/latest)
+* [Latest Version: 2.0.4](https://github.com/containers/podman/releases/latest)
* Latest Remote client for Windows
* Latest Remote client for MacOs
* Latest Static Remote client for Linux
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 48fe68ef9..d6b0eb3dd 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,22 @@
# Release Notes
+## 2.0.4
+### Bugfixes
+- Fixed a bug where the output of `podman image search` did not populate the Description field as it was mistakenly assigned to the ID field.
+- Fixed a bug where `podman build -` and `podman build` on an HTTP target would fail.
+- Fixed a bug where rootless Podman would improperly chown the copied-up contents of anonymous volumes ([#7130](https://github.com/containers/podman/issues/7130)).
+- Fixed a bug where Podman would sometimes HTML-escape special characters in its CLI output.
+- Fixed a bug where the `podman start --attach --interactive` command would print the container ID of the container attached to when exiting ([#7068](https://github.com/containers/podman/pull/7068)).
+- Fixed a bug where `podman run --ipc=host --pid=host` would only set `--pid=host` and not `--ipc=host` ([#7100](https://github.com/containers/podman/issues/7100)).
+- Fixed a bug where the `--publish` argument to `podman run`, `podman create` and `podman pod create` would not allow binding the same container port to more than one host port ([#7062](https://github.com/containers/podman/issues/7062)).
+- Fixed a bug where incorrect arguments to `podman images --format` could cause Podman to segfault.
+- Fixed a bug where `podman rmi --force` on an image ID with more than one name and at least one container using the image would not completely remove containers using the image ([#7153](https://github.com/containers/podman/issues/7153)).
+- Fixed a bug where memory usage in bytes and memory use percentage were swapped in the output of `podman stats --format=json`.
+
+### API
+- Fixed a bug where the libpod and compat events endpoints would fail if no filters were specified ([#7078](https://github.com/containers/podman/issues/7078)).
+- Fixed a bug where the `CgroupVersion` field in responses from the compat Info endpoint was prefixed by "v" (instead of just being "1" or "2", as is documented).
+
## 2.0.3
### Features
- The `podman search` command now allows wildcards in search terms.
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index e96b6a8d6..403a1065b 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -518,21 +518,3 @@ func GetCreateFlags(cf *ContainerCLIOpts) *pflag.FlagSet {
)
return &createFlags
}
-
-func AliasFlags(_ *pflag.FlagSet, name string) pflag.NormalizedName {
- switch name {
- case "healthcheck-command":
- name = "health-cmd"
- case "healthcheck-interval":
- name = "health-interval"
- case "healthcheck-retries":
- name = "health-retries"
- case "healthcheck-start-period":
- name = "health-start-period"
- case "healthcheck-timeout":
- name = "health-timeout"
- case "net":
- name = "network"
- }
- return pflag.NormalizedName(name)
-}
diff --git a/cmd/podman/common/util.go b/cmd/podman/common/util.go
index 41432c6f0..17e779c86 100644
--- a/cmd/podman/common/util.go
+++ b/cmd/podman/common/util.go
@@ -175,12 +175,15 @@ func parseSplitPort(hostIP, hostPort *string, ctrPort string, protocol *string)
if hostIP != nil {
if *hostIP == "" {
return newPort, errors.Errorf("must provide a non-empty container host IP to publish")
+ } else if *hostIP != "0.0.0.0" {
+ // If hostIP is 0.0.0.0, leave it unset - CNI treats
+ // 0.0.0.0 and empty differently, Docker does not.
+ testIP := net.ParseIP(*hostIP)
+ if testIP == nil {
+ return newPort, errors.Errorf("cannot parse %q as an IP address", *hostIP)
+ }
+ newPort.HostIP = testIP.String()
}
- testIP := net.ParseIP(*hostIP)
- if testIP == nil {
- return newPort, errors.Errorf("cannot parse %q as an IP address", *hostIP)
- }
- newPort.HostIP = testIP.String()
}
if hostPort != nil {
if *hostPort == "" {
diff --git a/cmd/podman/common/volumes.go b/cmd/podman/common/volumes.go
index 3b8f7ec6e..20c31bd81 100644
--- a/cmd/podman/common/volumes.go
+++ b/cmd/podman/common/volumes.go
@@ -20,6 +20,8 @@ const (
TypeVolume = "volume"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs = "tmpfs"
+ // TypeDevpts is the type for creating a devpts
+ TypeDevpts = "devpts"
)
var (
@@ -197,6 +199,15 @@ func getMounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.N
return nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
+ case TypeDevpts:
+ mount, err := getDevptsMount(tokens)
+ if err != nil {
+ return nil, nil, err
+ }
+ if _, ok := finalMounts[mount.Destination]; ok {
+ return nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination)
+ }
+ finalMounts[mount.Destination] = mount
case "volume":
volume, err := getNamedVolume(tokens)
if err != nil {
@@ -416,6 +427,39 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
return newMount, nil
}
+// Parse a single devpts mount entry from the --mount flag
+func getDevptsMount(args []string) (spec.Mount, error) {
+ newMount := spec.Mount{
+ Type: TypeDevpts,
+ Source: TypeDevpts,
+ }
+
+ var setDest bool
+
+ for _, val := range args {
+ kv := strings.Split(val, "=")
+ switch kv[0] {
+ case "target", "dst", "destination":
+ if len(kv) == 1 {
+ return newMount, errors.Wrapf(optionArgError, kv[0])
+ }
+ if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
+ return newMount, err
+ }
+ newMount.Destination = filepath.Clean(kv[1])
+ setDest = true
+ default:
+ return newMount, errors.Wrapf(util.ErrBadMntOption, kv[0])
+ }
+ }
+
+ if !setDest {
+ return newMount, noDestError
+ }
+
+ return newMount, nil
+}
+
// Parse a single volume mount entry from the --mount flag.
// Note that the volume-label option for named volumes is currently NOT supported.
// TODO: add support for --volume-label
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index dd77dc9d7..6eec93f98 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/podman/v2/cmd/podman/common"
"github.com/containers/podman/v2/cmd/podman/registry"
+ "github.com/containers/podman/v2/cmd/podman/utils"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/errorhandling"
@@ -58,7 +59,7 @@ func createFlags(flags *pflag.FlagSet) {
flags.SetInterspersed(false)
flags.AddFlagSet(common.GetCreateFlags(&cliVals))
flags.AddFlagSet(common.GetNetFlags())
- flags.SetNormalizeFunc(common.AliasFlags)
+ flags.SetNormalizeFunc(utils.AliasFlags)
if registry.IsRemote() {
_ = flags.MarkHidden("authfile")
@@ -124,7 +125,7 @@ func create(cmd *cobra.Command, args []string) error {
return err
}
- if _, err := createPodIfNecessary(s); err != nil {
+ if _, err := createPodIfNecessary(s, cliVals.Net); err != nil {
return err
}
@@ -283,7 +284,7 @@ func openCidFile(cidfile string) (*os.File, error) {
// createPodIfNecessary automatically creates a pod when requested. if the pod name
// has the form new:ID, the pod ID is created and the name in the spec generator is replaced
// with ID.
-func createPodIfNecessary(s *specgen.SpecGenerator) (*entities.PodCreateReport, error) {
+func createPodIfNecessary(s *specgen.SpecGenerator, netOpts *entities.NetOptions) (*entities.PodCreateReport, error) {
if !strings.HasPrefix(s.Pod, "new:") {
return nil, nil
}
@@ -292,11 +293,10 @@ func createPodIfNecessary(s *specgen.SpecGenerator) (*entities.PodCreateReport,
return nil, errors.Errorf("new pod name must be at least one character")
}
createOptions := entities.PodCreateOptions{
- Name: podName,
- Infra: true,
- Net: &entities.NetOptions{
- PublishPorts: s.PortMappings,
- },
+ Name: podName,
+ Infra: true,
+ Net: netOpts,
+ CreateCommand: os.Args,
}
s.Pod = podName
return registry.ContainerEngine().PodCreate(context.Background(), createOptions)
diff --git a/cmd/podman/containers/exec.go b/cmd/podman/containers/exec.go
index da450054f..e301ca588 100644
--- a/cmd/podman/containers/exec.go
+++ b/cmd/podman/containers/exec.go
@@ -10,6 +10,7 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/domain/entities"
envLib "github.com/containers/podman/v2/pkg/env"
+ "github.com/containers/podman/v2/pkg/rootless"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -110,6 +111,12 @@ func exec(_ *cobra.Command, args []string) error {
execOpts.Envs = envLib.Join(execOpts.Envs, cliEnv)
+ for fd := 3; fd < int(3+execOpts.PreserveFDs); fd++ {
+ if !rootless.IsFdInherited(fd) {
+ return errors.Errorf("file descriptor %d is not available - the preserve-fds option requires that file descriptors must be passed", fd)
+ }
+ }
+
if !execDetach {
streams := define.AttachStreams{}
streams.OutputStream = os.Stdout
diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go
index 646c52645..a84cb6814 100644
--- a/cmd/podman/containers/run.go
+++ b/cmd/podman/containers/run.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/podman/v2/cmd/podman/common"
"github.com/containers/podman/v2/cmd/podman/registry"
+ "github.com/containers/podman/v2/cmd/podman/utils"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/errorhandling"
@@ -58,7 +59,7 @@ func runFlags(flags *pflag.FlagSet) {
flags.SetInterspersed(false)
flags.AddFlagSet(common.GetCreateFlags(&cliVals))
flags.AddFlagSet(common.GetNetFlags())
- flags.SetNormalizeFunc(common.AliasFlags)
+ flags.SetNormalizeFunc(utils.AliasFlags)
flags.BoolVar(&runOpts.SigProxy, "sig-proxy", true, "Proxy received signals to the process")
flags.BoolVar(&runRmi, "rmi", false, "Remove container image unless used by other containers")
flags.UintVar(&runOpts.PreserveFDs, "preserve-fds", 0, "Pass a number of additional file descriptors into the container")
@@ -125,6 +126,11 @@ func run(cmd *cobra.Command, args []string) error {
if err := createInit(cmd); err != nil {
return err
}
+ for fd := 3; fd < int(3+runOpts.PreserveFDs); fd++ {
+ if !rootless.IsFdInherited(fd) {
+ return errors.Errorf("file descriptor %d is not available - the preserve-fds option requires that file descriptors must be passed", fd)
+ }
+ }
imageName := args[0]
if !cliVals.RootFS {
@@ -176,7 +182,7 @@ func run(cmd *cobra.Command, args []string) error {
}
runOpts.Spec = s
- if _, err := createPodIfNecessary(s); err != nil {
+ if _, err := createPodIfNecessary(s, cliVals.Net); err != nil {
return err
}
diff --git a/cmd/podman/containers/start.go b/cmd/podman/containers/start.go
index 05fdfc780..ccbe80317 100644
--- a/cmd/podman/containers/start.go
+++ b/cmd/podman/containers/start.go
@@ -99,12 +99,17 @@ func start(cmd *cobra.Command, args []string) error {
}
for _, r := range responses {
- if r.Err == nil && !startOptions.Attach {
- fmt.Println(r.RawInput)
+ if r.Err == nil {
+ if startOptions.Attach {
+ // Implement the exitcode when the only one container is enabled attach
+ registry.SetExitCode(r.ExitCode)
+ } else {
+ fmt.Println(r.RawInput)
+ }
} else {
errs = append(errs, r.Err)
}
}
- // TODO need to understand an implement exitcodes
+
return errs.PrintErrors()
}
diff --git a/cmd/podman/registry/remote.go b/cmd/podman/registry/remote.go
index 7dbdd3824..9b7523ac0 100644
--- a/cmd/podman/registry/remote.go
+++ b/cmd/podman/registry/remote.go
@@ -5,22 +5,24 @@ import (
"sync"
"github.com/containers/podman/v2/pkg/domain/entities"
- "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
)
-var (
- // Was --remote given on command line
- remoteOverride bool
- remoteSync sync.Once
-)
+// Value for --remote given on command line
+var remoteFromCLI = struct {
+ Value bool
+ sync sync.Once
+}{}
-// IsRemote returns true if podman was built to run remote
+// IsRemote returns true if podman was built to run remote or --remote flag given on CLI
// Use in init() functions as a initialization check
func IsRemote() bool {
- remoteSync.Do(func() {
- remote := &cobra.Command{}
- remote.Flags().BoolVarP(&remoteOverride, "remote", "r", false, "")
- _ = remote.ParseFlags(os.Args)
+ remoteFromCLI.sync.Do(func() {
+ fs := pflag.NewFlagSet("remote", pflag.ContinueOnError)
+ fs.BoolVarP(&remoteFromCLI.Value, "remote", "r", false, "")
+ fs.ParseErrorsWhitelist.UnknownFlags = true
+ fs.SetInterspersed(false)
+ _ = fs.Parse(os.Args[1:])
})
- return podmanOptions.EngineMode == entities.TunnelMode || remoteOverride
+ return podmanOptions.EngineMode == entities.TunnelMode || remoteFromCLI.Value
}
diff --git a/cmd/podman/system/service.go b/cmd/podman/system/service.go
index 2d511f0ec..7c692b07e 100644
--- a/cmd/podman/system/service.go
+++ b/cmd/podman/system/service.go
@@ -49,7 +49,7 @@ func init() {
flags := srvCmd.Flags()
flags.Int64VarP(&srvArgs.Timeout, "time", "t", 5, "Time until the service session expires in seconds. Use 0 to disable the timeout")
- flags.BoolVar(&srvArgs.Varlink, "varlink", false, "Use legacy varlink service instead of REST")
+ flags.BoolVar(&srvArgs.Varlink, "varlink", false, "Use legacy varlink service instead of REST. Unit of --time changes from seconds to milliseconds.")
_ = flags.MarkDeprecated("varlink", "valink API is deprecated.")
flags.SetNormalizeFunc(aliasTimeoutFlag)
@@ -88,14 +88,15 @@ func service(cmd *cobra.Command, args []string) error {
opts := entities.ServiceOptions{
URI: apiURI,
- Timeout: time.Duration(srvArgs.Timeout) * time.Second,
Command: cmd,
}
if srvArgs.Varlink {
+ opts.Timeout = time.Duration(srvArgs.Timeout) * time.Millisecond
return registry.ContainerEngine().VarlinkService(registry.GetContext(), opts)
}
+ opts.Timeout = time.Duration(srvArgs.Timeout) * time.Second
return restService(opts, cmd.Flags(), registry.PodmanConfig())
}
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 1795674e3..2411eaabc 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -500,10 +500,14 @@ export GOPATH=%{buildroot}/%{gopath}:$(pwd)/vendor:%{gopath}
%{_datadir}/zsh/site-functions/*
%{_libexecdir}/%{name}/conmon
%config(noreplace) %{_sysconfdir}/cni/net.d/87-%{name}-bridge.conflist
+%{_unitdir}/podman-auto-update.service
+%{_unitdir}/podman-auto-update.timer
%{_unitdir}/podman.service
%{_unitdir}/podman.socket
%{_usr}/lib/systemd/user/podman.service
%{_usr}/lib/systemd/user/podman.socket
+%{_usr}/lib/systemd/user/podman-auto-update.service
+%{_usr}/lib/systemd/user/podman-auto-update.timer
%if 0%{?with_devel}
%files -n libpod-devel -f devel.file-list
diff --git a/contrib/systemd/system/podman.service b/contrib/systemd/system/podman.service
index 4a63735a3..c8751168d 100644
--- a/contrib/systemd/system/podman.service
+++ b/contrib/systemd/system/podman.service
@@ -8,7 +8,3 @@ StartLimitIntervalSec=0
[Service]
Type=simple
ExecStart=/usr/bin/podman system service
-
-[Install]
-WantedBy=multi-user.target
-Also=podman.socket
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index b4456225e..5c58d59fc 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -494,7 +494,7 @@ Tune a container's memory swappiness behavior. Accepts an integer between 0 and
Attach a filesystem mount to the container
-Current supported mount TYPES are `bind`, `volume`, and `tmpfs`. <sup>[[1]](#Footnote1)</sup>
+Current supported mount TYPES are `bind`, `volume`, `tmpfs` and `devpts`. <sup>[[1]](#Footnote1)</sup>
e.g.
@@ -506,6 +506,8 @@ Current supported mount TYPES are `bind`, `volume`, and `tmpfs`. <sup>[[1]](#Foo
type=tmpfs,tmpfs-size=512M,destination=/path/in/container
+ type=devpts,destination=/dev/pts
+
Common Options:
· src, source: mount source spec for bind and volume. Mandatory for bind.
@@ -560,9 +562,14 @@ Valid values are:
- `ns:<path>`: path to a network namespace to join
- `private`: create a new namespace for the container (default)
- `slirp4netns[:OPTIONS,...]`: use slirp4netns to create a user network stack. This is the default for rootless containers. It is possible to specify these additional options:
- **port_handler=rootlesskit**: Use rootlesskit for port forwarding. Default.
- **port_handler=slirp4netns**: Use the slirp4netns port forwarding.
- **allow_host_loopback=true|false**: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`). Default to false.
+ - **allow_host_loopback=true|false**: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`). Default is false.
+ - **enable_ipv6=true|false**: Enable IPv6. Default is false. (Required for `outbound_addr6`).
+ - **outbound_addr=INTERFACE**: Specify the outbound interface slirp should bind to (ipv4 traffic only).
+ - **outbound_addr=IPv4**: Specify the outbound ipv4 address slirp should bind to.
+ - **outbound_addr6=INTERFACE**: Specify the outbound interface slirp should bind to (ipv6 traffic only).
+ - **outbound_addr6=IPv6**: Specify the outbound ipv6 address slirp should bind to.
+ - **port_handler=rootlesskit**: Use rootlesskit for port forwarding. Default.
+ - **port_handler=slirp4netns**: Use the slirp4netns port forwarding.
**--network-alias**=*alias*
@@ -634,7 +641,8 @@ Both hostPort and containerPort can be specified as a range of ports.
When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range.
(e.g., `podman run -p 1234-1236:1222-1224 --name thisWorks -t busybox`
but not `podman run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`)
-With ip: `podman run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage`
+With host IP: `podman run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage`
+If host IP is set to 0.0.0.0 or not set at all, the port will be bound on all IPs on the host.
Host port does not have to be specified (e.g. `podman run -p 127.0.0.1::80`).
If it is not, the container port will be randomly assigned a port on the host.
Use `podman port` to see the actual mapping: `podman port CONTAINER $CONTAINERPORT`
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index 466c7e2bf..d0b1b3588 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -149,9 +149,9 @@ WantedBy=multi-user.target default.target
Podman-generated unit files include an `[Install]` section, which carries installation information for the unit. It is used by the enable and disable commands of systemctl(1) during installation.
-Once you have generated the systemd unit file, you can copy the generated systemd file to ```/usr/lib/systemd/system``` for installing as a root user and to ```$HOME/.config/systemd/user ``` for installing it as a non-root user. Enable the copied unit file or files using `systemctl enable`.
+Once you have generated the systemd unit file, you can copy the generated systemd file to ```/etc/systemd/system``` for installing as a root user and to ```$HOME/.config/systemd/user``` for installing it as a non-root user. Enable the copied unit file or files using `systemctl enable`.
-Note: Coping unit files to ```/usr/lib/systemd/system``` and enabling it marks the unit file to be automatically started at boot. And smillarly, coping a unit file to ```$HOME/.config/systemd/user ``` and enabling it marks the unit file to be automatically started on user login.
+Note: Coping unit files to ```/etc/systemd/system``` and enabling it marks the unit file to be automatically started at boot. And smillarly, coping a unit file to ```$HOME/.config/systemd/user``` and enabling it marks the unit file to be automatically started on user login.
```
@@ -162,14 +162,14 @@ $ podman generate systemd --files --name systemd-pod
# Copy all the generated files.
-$ sudo cp pod-systemd-pod.service container-great_payne.service /usr/lib/systemd/system
+$ sudo cp pod-systemd-pod.service container-great_payne.service /etc/systemd/system
$ systemctl enable pod-systemd-pod.service
-Created symlink /etc/systemd/system/multi-user.target.wants/pod-systemd-pod.service → /usr/lib/systemd/system/pod-systemd-pod.service.
-Created symlink /etc/systemd/system/default.target.wants/pod-systemd-pod.service → /usr/lib/systemd/system/pod-systemd-pod.service.
+Created symlink /etc/systemd/system/multi-user.target.wants/pod-systemd-pod.service → /etc/systemd/system/pod-systemd-pod.service.
+Created symlink /etc/systemd/system/default.target.wants/pod-systemd-pod.service → /etc/systemd/system/pod-systemd-pod.service.
$ systemctl is-enabled pod-systemd-pod.service
enabled
```
-To run the user services placed in `$HOME/.config/systemd/user/` on first login of that user, enable the service with --user flag.
+To run the user services placed in `$HOME/.config/systemd/user` on first login of that user, enable the service with --user flag.
```
$ systemctl --user enable <.service>
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index 4fdb7f81b..db742e429 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -501,7 +501,7 @@ Tune a container's memory swappiness behavior. Accepts an integer between *0* an
Attach a filesystem mount to the container
-Current supported mount TYPEs are **bind**, **volume**, and **tmpfs**. <sup>[[1]](#Footnote1)</sup>
+Current supported mount TYPEs are **bind**, **volume**, **tmpfs** and **devpts**. <sup>[[1]](#Footnote1)</sup>
e.g.
@@ -513,6 +513,8 @@ Current supported mount TYPEs are **bind**, **volume**, and **tmpfs**. <sup>[[1]
type=tmpfs,tmpfs-size=512M,destination=/path/in/container
+ type=devpts,destination=/dev/pts
+
Common Options:
· src, source: mount source spec for bind and volume. Mandatory for bind.
@@ -647,6 +649,8 @@ Both hostPort and containerPort can be specified as a range of ports.
When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range.
+If host IP is set to 0.0.0.0 or not set at all, the port will be bound on all IPs on the host.
+
Host port does not have to be specified (e.g. `podman run -p 127.0.0.1::80`).
If it is not, the container port will be randomly assigned a port on the host.
@@ -901,20 +905,22 @@ Ulimit options. You can use **host** to copy the current configuration from the
Sets the username or UID used and optionally the groupname or GID for the specified command.
-Without this argument the command will be run as root in the container.
+Without this argument, the command will run as the user specified in the container image. Unless overridden by a `USER` command in the Containerfile or by a value passed to this option, this user generally defaults to root.
+
+When a user namespace is not in use, the UID and GID used within the container and on the host will match. When user namespaces are in use, however, the UID and GID in the container may correspond to another UID and GID on the host. In rootless containers, for example, a user namespace is always used, and root in the container will by default correspond to the UID and GID of the user invoking Podman.
**--userns**=**auto**|**host**|**keep-id**|**container:**_id_|**ns:**_namespace_
-Set the user namespace mode for the container. It defaults to the **PODMAN_USERNS** environment variable. An empty value means user namespaces are disabled.
+Set the user namespace mode for the container. It defaults to the **PODMAN_USERNS** environment variable. An empty value ("") means user namespaces are disabled unless an explicit mapping is set with they `--uidmapping` and `--gidmapping` options.
- **auto**: automatically create a namespace. It is possible to specify other options to `auto`. The supported options are
**size=SIZE** to specify an explicit size for the automatic user namespace. e.g. `--userns=auto:size=8192`. If `size` is not specified, `auto` will guess a size for the user namespace.
**uidmapping=HOST_UID:CONTAINER_UID:SIZE** to force a UID mapping to be present in the user namespace.
**gidmapping=HOST_UID:CONTAINER_UID:SIZE** to force a GID mapping to be present in the user namespace.
-- **host**: run in the user namespace of the caller. This is the default if no user namespace options are set. The processes running in the container will have the same privileges on the host as any other process launched by the calling user.
+- **host**: run in the user namespace of the caller. The processes running in the container will have the same privileges on the host as any other process launched by the calling user (default).
- **keep-id**: creates a user namespace where the current rootless user's UID:GID are mapped to the same values in the container. This option is ignored for containers created by the root user.
- **ns**: run the container in the given existing user namespace.
-- **private**: create a new namespace for the container (default)
+- **private**: create a new namespace for the container.
- **container**: join the user namespace of the specified container.
This option is incompatible with **--gidmap**, **--uidmap**, **--subuid** and **--subgid**.
diff --git a/go.mod b/go.mod
index 8f5a53c80..a7c63b9f4 100644
--- a/go.mod
+++ b/go.mod
@@ -10,7 +10,7 @@ require (
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921
github.com/containernetworking/plugins v0.8.6
- github.com/containers/buildah v1.15.1-0.20200708111410-d2ea9429455d
+ github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c
github.com/containers/common v0.18.0
github.com/containers/conmon v2.0.19+incompatible
github.com/containers/image/v5 v5.5.1
@@ -62,8 +62,6 @@ require (
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
- golang.org/x/text v0.3.3 // indirect
- gopkg.in/yaml.v2 v2.3.0
k8s.io/api v0.18.6
k8s.io/apimachinery v0.18.6
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
diff --git a/go.sum b/go.sum
index 01b655388..4ea2880de 100644
--- a/go.sum
+++ b/go.sum
@@ -70,8 +70,8 @@ github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 h1:eUMd8
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.6 h1:npZTLiMa4CRn6m5P9+1Dz4O1j0UeFbm8VYN6dlsw568=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containers/buildah v1.15.1-0.20200708111410-d2ea9429455d h1:HgJJn1UBFjM464NpEmgLwVje5vSF/fBYAdLLoww9HgU=
-github.com/containers/buildah v1.15.1-0.20200708111410-d2ea9429455d/go.mod h1:HUAiD1mCGPFPcIuk5zls1LElLhXo7Q3hWDwheojjyAs=
+github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c h1:+V9RQOhg1LyhyHHU33OVjO+Uan1MoVbkjufH8E/BeLU=
+github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c/go.mod h1:XVOKQHd1sP/7tFpCXIaNsUJZdTNCwVZ7YZiLnnEfrVg=
github.com/containers/common v0.15.2/go.mod h1:rhpXuGLTEKsk/xX/x0iKGHjRadMHpBd2ZiNDugwXPEM=
github.com/containers/common v0.18.0 h1:pZB6f17N5QV43TcT06gtx1lb0rxd/4StFdVhP9CtgQg=
github.com/containers/common v0.18.0/go.mod h1:H2Wqvx6wkqdzT4RcTCqIG4W0HSOZwUbbNiUTX1+VohU=
@@ -83,6 +83,8 @@ github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDpl
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
+github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
+github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/psgo v1.5.1 h1:MQNb7FLbXqBdqz6u4lI2QWizVz4RSTzs1+Nk9XT1iVA=
github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
@@ -479,6 +481,8 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index e98a6e907..2575f0e86 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -2347,11 +2347,19 @@ func (s *BoltState) AddPod(pod *Pod) error {
// Check if we already have something with the given ID and name
idExist := idsBkt.Get(podID)
if idExist != nil {
- return errors.Wrapf(define.ErrPodExists, "ID %s is in use", pod.ID())
+ err = define.ErrPodExists
+ if allPodsBkt.Get(idExist) == nil {
+ err = define.ErrCtrExists
+ }
+ return errors.Wrapf(err, "ID \"%s\" is in use", pod.ID())
}
nameExist := namesBkt.Get(podName)
if nameExist != nil {
- return errors.Wrapf(define.ErrPodExists, "name %s is in use", pod.Name())
+ err = define.ErrPodExists
+ if allPodsBkt.Get(nameExist) == nil {
+ err = define.ErrCtrExists
+ }
+ return errors.Wrapf(err, "name \"%s\" is in use", pod.Name())
}
// We are good to add the pod
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index ddbd40da8..9be753d26 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -586,11 +586,19 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
// Check if we already have a container with the given ID and name
idExist := idsBucket.Get(ctrID)
if idExist != nil {
- return errors.Wrapf(define.ErrCtrExists, "ID %s is in use", ctr.ID())
+ err = define.ErrCtrExists
+ if allCtrsBucket.Get(idExist) == nil {
+ err = define.ErrPodExists
+ }
+ return errors.Wrapf(err, "ID \"%s\" is in use", ctr.ID())
}
nameExist := namesBucket.Get(ctrName)
if nameExist != nil {
- return errors.Wrapf(define.ErrCtrExists, "name %s is in use", ctr.Name())
+ err = define.ErrCtrExists
+ if allCtrsBucket.Get(nameExist) == nil {
+ err = define.ErrPodExists
+ }
+ return errors.Wrapf(err, "name \"%s\" is in use", ctr.Name())
}
// No overlapping containers
diff --git a/libpod/container_config.go b/libpod/container_config.go
index 301b867fc..5f89395c1 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -13,25 +13,52 @@ import (
// ContainerConfig contains all information that was used to create the
// container. It may not be changed once created.
-// It is stored, read-only, on disk
+// It is stored, read-only, on disk in Libpod's State.
+// Any changes will not be written back to the database, and will cause
+// inconsistencies with other Libpod instances.
type ContainerConfig struct {
+ // Spec is OCI runtime spec used to create the container. This is passed
+ // in when the container is created, but it is not the final spec used
+ // to run the container - it will be modified by Libpod to add things we
+ // manage (e.g. bind mounts for /etc/resolv.conf, named volumes, a
+ // network namespace prepared by CNI or slirp4netns) in the
+ // generateSpec() function.
Spec *spec.Spec `json:"spec"`
+ // ID is a hex-encoded 256-bit pseudorandom integer used as a unique
+ // identifier for the container. IDs are globally unique in Libpod -
+ // once an ID is in use, no other container or pod will be created with
+ // the same one until the holder of the ID has been removed.
+ // ID is generated by Libpod, and cannot be chosen or influenced by the
+ // user (except when restoring a checkpointed container).
+ // ID is guaranteed to be 64 characters long.
ID string `json:"id"`
+ // Name is a human-readable name for the container. All containers must
+ // have a non-empty name. Name may be provided when the container is
+ // created; if no name is chosen, a name will be auto-generated.
Name string `json:"name"`
- // Full ID of the pood the container belongs to
+ // Pod is the full ID of the pod the container belongs to. If the
+ // container does not belong to a pod, this will be empty.
+ // If this is not empty, a pod with this ID is guaranteed to exist in
+ // the state for the duration of this container's existence.
Pod string `json:"pod,omitempty"`
- // Namespace the container is in
+ // Namespace is the libpod Namespace the container is in.
+ // Namespaces are used to divide containers in the state.
Namespace string `json:"namespace,omitempty"`
- // ID of this container's lock
+ // LockID is the ID of this container's lock. Each container, pod, and
+ // volume is assigned a unique Lock (from one of several backends) by
+ // the libpod Runtime. This lock will belong only to this container for
+ // the duration of the container's lifetime.
LockID uint32 `json:"lockID"`
- // CreateCommand is the full command plus arguments of the process the
- // container has been created with.
+ // CreateCommand is the full command plus arguments that were used to
+ // create the container. It is shown in the output of Inspect, and may
+ // be used to recreate an identical container for automatic updates or
+ // portable systemd unit files.
CreateCommand []string `json:"CreateCommand,omitempty"`
// RawImageName is the raw and unprocessed name of the image when creating
@@ -40,10 +67,13 @@ type ContainerConfig struct {
// name and not some normalized instance of it.
RawImageName string `json:"RawImageName,omitempty"`
- // UID/GID mappings used by the storage
+ // IDMappings are UID/GID mappings used by the container's user
+ // namespace. They are used by the OCI runtime when creating the
+ // container, and by c/storage to ensure that the container's files have
+ // the appropriate owner.
IDMappings storage.IDMappingOptions `json:"idMappingsOptions,omitempty"`
- // IDs of dependency containers.
+ // Dependencies are the IDs of dependency containers.
// These containers must be started before this container is started.
Dependencies []string
@@ -59,45 +89,92 @@ type ContainerConfig struct {
// ContainerRootFSConfig is an embedded sub-config providing config info
// about the container's root fs.
type ContainerRootFSConfig struct {
- RootfsImageID string `json:"rootfsImageID,omitempty"`
+ // RootfsImageID is the ID of the image used to create the container.
+ // If the container was created from a Rootfs, this will be empty.
+ // If non-empty, Podman will create a root filesystem for the container
+ // based on an image with this ID.
+ // This conflicts with Rootfs.
+ RootfsImageID string `json:"rootfsImageID,omitempty"`
+ // RootfsImageName is the (normalized) name of the image used to create
+ // the container. If the container was created from a Rootfs, this will
+ // be empty.
RootfsImageName string `json:"rootfsImageName,omitempty"`
- // Rootfs to use for the container, this conflicts with RootfsImageID
+ // Rootfs is a directory to use as the container's root filesystem.
+ // If RootfsImageID is set, this will be empty.
+ // If this is set, Podman will not create a root filesystem for the
+ // container based on an image, and will instead use the given directory
+ // as the container's root.
+ // Conflicts with RootfsImageID.
Rootfs string `json:"rootfs,omitempty"`
- // Src path to be mounted on /dev/shm in container.
+ // ShmDir is the path to be mounted on /dev/shm in container.
+ // If not set manually at creation time, Libpod will create a tmpfs
+ // with the size specified in ShmSize and populate this with the path of
+ // said tmpfs.
ShmDir string `json:"ShmDir,omitempty"`
- // Size of the container's SHM.
+ // ShmSize is the size of the container's SHM. Only used if ShmDir was
+ // not set manually at time of creation.
ShmSize int64 `json:"shmSize"`
// Static directory for container content that will persist across
// reboot.
+ // StaticDir is a persistent directory for Libpod files that will
+ // survive system reboot. It is not part of the container's rootfs and
+ // is not mounted into the container. It will be removed when the
+ // container is removed.
+ // Usually used to store container log files, files that will be bind
+ // mounted into the container (e.g. the resolv.conf we made for the
+ // container), and other per-container content.
StaticDir string `json:"staticDir"`
- // Mounts list contains all additional mounts into the container rootfs.
- // These include the SHM mount.
+ // Mounts contains all additional mounts into the container rootfs.
+ // It is presently only used for the container's SHM directory.
// These must be unmounted before the container's rootfs is unmounted.
Mounts []string `json:"mounts,omitempty"`
- // NamedVolumes lists the named volumes to mount into the container.
+ // NamedVolumes lists the Libpod named volumes to mount into the
+ // container. Each named volume is guaranteed to exist so long as this
+ // container exists.
NamedVolumes []*ContainerNamedVolume `json:"namedVolumes,omitempty"`
// OverlayVolumes lists the overlay volumes to mount into the container.
OverlayVolumes []*ContainerOverlayVolume `json:"overlayVolumes,omitempty"`
+ // CreateWorkingDir indicates that Libpod should create the container's
+ // working directory if it does not exist. Some OCI runtimes do this by
+ // default, but others do not.
+ CreateWorkingDir bool `json:"createWorkingDir,omitempty"`
}
// ContainerSecurityConfig is an embedded sub-config providing security configuration
// to the container.
type ContainerSecurityConfig struct {
- // Whether the container is privileged
+ // Pirivileged is whether the container is privileged. Privileged
+ // containers have lessened security and increased access to the system.
+ // Note that this does NOT directly correspond to Podman's --privileged
+ // flag - most of the work of that flag is done in creating the OCI spec
+ // given to Libpod. This only enables a small subset of the overall
+ // operation, mostly around mounting the container image with reduced
+ // security.
Privileged bool `json:"privileged"`
- // SELinux process label for container
+ // ProcessLabel is the SELinux process label for the container.
ProcessLabel string `json:"ProcessLabel,omitempty"`
- // SELinux mount label for root filesystem
+ // MountLabel is the SELinux mount label for the container's root
+ // filesystem. Only used if the container was created from an image.
+ // If not explicitly set, an unused random MLS label will be assigned by
+ // containers/storage (but only if SELinux is enabled).
MountLabel string `json:"MountLabel,omitempty"`
- // LabelOpts are options passed in by the user to setup SELinux labels
+ // LabelOpts are options passed in by the user to setup SELinux labels.
+ // These are used by the containers/storage library.
LabelOpts []string `json:"labelopts,omitempty"`
- // User and group to use in the container
- // Can be specified by name or UID/GID
+ // User and group to use in the container. Can be specified as only user
+ // (in which case we will attempt to look up the user in the container
+ // to determine the appropriate group) or user and group separated by a
+ // colon.
+ // Can be specified by name or UID/GID.
+ // If unset, this will default to UID and GID 0 (root).
User string `json:"user,omitempty"`
- // Additional groups to add
+ // Groups are additional groups to add the container's user to. These
+ // are resolved within the container using the container's /etc/passwd.
Groups []string `json:"groups,omitempty"`
- // AddCurrentUserPasswdEntry indicates that the current user passwd entry
- // should be added to the /etc/passwd within the container
+ // AddCurrentUserPasswdEntry indicates that Libpod should ensure that
+ // the container's /etc/passwd contains an entry for the user running
+ // Libpod - mostly used in rootless containers where the user running
+ // Libpod wants to retain their UID inside the container.
AddCurrentUserPasswdEntry bool `json:"addCurrentUserPasswdEntry,omitempty"`
}
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index 08e95e6dd..bfeae0a11 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -415,6 +415,13 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, h
execOpts, err := prepareForExec(c, session)
if err != nil {
+ session.State = define.ExecStateStopped
+ session.ExitCode = define.ExecErrorCodeGeneric
+
+ if err := c.save(); err != nil {
+ logrus.Errorf("Error saving container %s exec session %s after failure to prepare: %v", err, c.ID(), session.ID())
+ }
+
return err
}
@@ -427,6 +434,13 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, h
pid, attachChan, err := c.ociRuntime.ExecContainerHTTP(c, session.ID(), execOpts, httpCon, httpBuf, streams, cancel)
if err != nil {
+ session.State = define.ExecStateStopped
+ session.ExitCode = define.TranslateExecErrorToExitCode(define.ExecErrorCodeGeneric, err)
+
+ if err := c.save(); err != nil {
+ logrus.Errorf("Error saving container %s exec session %s after failure to start: %v", err, c.ID(), session.ID())
+ }
+
return err
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 4cfe992ea..9fb9738dc 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -159,7 +159,32 @@ func (c *Container) prepare() error {
}
// Save changes to container state
- return c.save()
+ if err := c.save(); err != nil {
+ return err
+ }
+
+ // Ensure container entrypoint is created (if required)
+ if c.config.CreateWorkingDir {
+ workdir, err := securejoin.SecureJoin(c.state.Mountpoint, c.WorkingDir())
+ if err != nil {
+ return errors.Wrapf(err, "error creating path to container %s working dir", c.ID())
+ }
+ rootUID := c.RootUID()
+ rootGID := c.RootGID()
+
+ if err := os.MkdirAll(workdir, 0755); err != nil {
+ if os.IsExist(err) {
+ return nil
+ }
+ return errors.Wrapf(err, "error creating container %s working dir", c.ID())
+ }
+
+ if err := os.Chown(workdir, rootUID, rootGID); err != nil {
+ return errors.Wrapf(err, "error chowning container %s working directory to container root", c.ID())
+ }
+ }
+
+ return nil
}
// cleanupNetwork unmounts and cleans up the container's network
diff --git a/libpod/define/errors.go b/libpod/define/errors.go
index 23d10f527..6e372eb5e 100644
--- a/libpod/define/errors.go
+++ b/libpod/define/errors.go
@@ -2,27 +2,27 @@ package define
import (
"errors"
-
- "github.com/containers/podman/v2/libpod/image"
- "github.com/containers/podman/v2/utils"
)
var (
// ErrNoSuchCtr indicates the requested container does not exist
- ErrNoSuchCtr = image.ErrNoSuchCtr
+ ErrNoSuchCtr = errors.New("no such container")
// ErrNoSuchPod indicates the requested pod does not exist
- ErrNoSuchPod = image.ErrNoSuchPod
+ ErrNoSuchPod = errors.New("no such pod")
// ErrNoSuchImage indicates the requested image does not exist
- ErrNoSuchImage = image.ErrNoSuchImage
+ ErrNoSuchImage = errors.New("no such image")
// ErrNoSuchTag indicates the requested image tag does not exist
- ErrNoSuchTag = image.ErrNoSuchTag
+ ErrNoSuchTag = errors.New("no such tag")
// ErrNoSuchVolume indicates the requested volume does not exist
ErrNoSuchVolume = errors.New("no such volume")
+ // ErrNoSuchNetwork indicates the requested network does not exist
+ ErrNoSuchNetwork = errors.New("network not found")
+
// ErrNoSuchExecSession indicates that the requested exec session does
// not exist.
ErrNoSuchExecSession = errors.New("no such exec session")
@@ -76,7 +76,7 @@ var (
// ErrDetach indicates that an attach session was manually detached by
// the user.
- ErrDetach = utils.ErrDetach
+ ErrDetach = errors.New("detached from container")
// ErrWillDeadlock indicates that the requested operation will cause a
// deadlock. This is usually caused by upgrade issues, and is resolved
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 7c2a3e0f2..dc55dbc77 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -4,7 +4,6 @@ package events
import (
"context"
- "fmt"
"strconv"
"time"
@@ -50,7 +49,7 @@ func (e EventJournalD) Write(ee Event) error {
case Volume:
m["PODMAN_NAME"] = ee.Name
}
- return journal.Send(fmt.Sprintf("%s", ee.ToHumanReadable()), journal.PriInfo, m)
+ return journal.Send(string(ee.ToHumanReadable()), journal.PriInfo, m)
}
// Read reads events from the journal and sends qualified events to the event channel
diff --git a/libpod/image/errors.go b/libpod/image/errors.go
index ddbf7be4b..3f58b1c6a 100644
--- a/libpod/image/errors.go
+++ b/libpod/image/errors.go
@@ -1,17 +1,16 @@
package image
import (
- "errors"
+ "github.com/containers/podman/v2/libpod/define"
)
-// Copied directly from libpod errors to avoid circular imports
var (
// ErrNoSuchCtr indicates the requested container does not exist
- ErrNoSuchCtr = errors.New("no such container")
+ ErrNoSuchCtr = define.ErrNoSuchCtr
// ErrNoSuchPod indicates the requested pod does not exist
- ErrNoSuchPod = errors.New("no such pod")
+ ErrNoSuchPod = define.ErrNoSuchPod
// ErrNoSuchImage indicates the requested image does not exist
- ErrNoSuchImage = errors.New("no such image")
+ ErrNoSuchImage = define.ErrNoSuchImage
// ErrNoSuchTag indicates the requested image tag does not exist
- ErrNoSuchTag = errors.New("no such tag")
+ ErrNoSuchTag = define.ErrNoSuchTag
)
diff --git a/libpod/image/filters.go b/libpod/image/filters.go
index 9738a7d5e..db647954f 100644
--- a/libpod/image/filters.go
+++ b/libpod/image/filters.go
@@ -29,6 +29,26 @@ func CreatedBeforeFilter(createTime time.Time) ResultFilter {
}
}
+// IntermediateFilter returns filter for intermediate images (i.e., images
+// with children and no tags).
+func (ir *Runtime) IntermediateFilter(ctx context.Context, images []*Image) (ResultFilter, error) {
+ tree, err := ir.layerTree()
+ if err != nil {
+ return nil, err
+ }
+ return func(i *Image) bool {
+ if len(i.Names()) > 0 {
+ return true
+ }
+ children, err := tree.children(ctx, i, false)
+ if err != nil {
+ logrus.Error(err.Error())
+ return false
+ }
+ return len(children) == 0
+ }, nil
+}
+
// CreatedAfterFilter allows you to filter on images created after
// the given time.Time
func CreatedAfterFilter(createTime time.Time) ResultFilter {
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 8b2aa318f..6106084d5 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -14,6 +14,7 @@ import (
"syscall"
"time"
+ "github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
dockerarchive "github.com/containers/image/v5/docker/archive"
@@ -75,6 +76,8 @@ type InfoImage struct {
Layers []LayerInfo
}
+const maxRetry = 3
+
// ImageFilter is a function to determine whether a image is included
// in command output. Images to be outputted are tested using the function.
// A true return will include the image, a false return will exclude it.
@@ -158,7 +161,7 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, label)
+ imageName, err := ir.pullImageFromHeuristicSource(ctx, name, writer, authfile, signaturePolicyPath, signingoptions, dockeroptions, &retry.RetryOptions{MaxRetry: maxRetry}, label)
if err != nil {
return nil, errors.Wrapf(err, "unable to pull %s", name)
}
@@ -176,7 +179,7 @@ func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.Im
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
- imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{})
+ imageNames, err := ir.pullImageFromReference(ctx, srcRef, writer, "", signaturePolicyPath, SigningOptions{}, &DockerRegistryOptions{}, &retry.RetryOptions{MaxRetry: maxRetry})
if err != nil {
return nil, errors.Wrapf(err, "unable to pull %s", transports.ImageName(srcRef))
}
@@ -856,26 +859,6 @@ func (i *Image) Dangling() bool {
return len(i.Names()) == 0
}
-// Intermediate returns true if the image is cache or intermediate image.
-// Cache image has parent and child.
-func (i *Image) Intermediate(ctx context.Context) (bool, error) {
- parent, err := i.IsParent(ctx)
- if err != nil {
- return false, err
- }
- if !parent {
- return false, nil
- }
- img, err := i.GetParent(ctx)
- if err != nil {
- return false, err
- }
- if img != nil {
- return true, nil
- }
- return false, nil
-}
-
// User returns the image's user
func (i *Image) User(ctx context.Context) (string, error) {
imgInspect, err := i.inspect(ctx, false)
@@ -1214,7 +1197,7 @@ func splitString(input string) string {
// the parent of any other layer in store. Double check that image with that
// layer exists as well.
func (i *Image) IsParent(ctx context.Context) (bool, error) {
- children, err := i.getChildren(ctx, 1)
+ children, err := i.getChildren(ctx, false)
if err != nil {
if errors.Cause(err) == ErrImageIsBareList {
return false, nil
@@ -1289,63 +1272,16 @@ func areParentAndChild(parent, child *imgspecv1.Image) bool {
// GetParent returns the image ID of the parent. Return nil if a parent is not found.
func (i *Image) GetParent(ctx context.Context) (*Image, error) {
- var childLayer *storage.Layer
- images, err := i.imageruntime.GetImages()
- if err != nil {
- return nil, err
- }
- if i.TopLayer() != "" {
- if childLayer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil {
- return nil, err
- }
- }
- // fetch the configuration for the child image
- child, err := i.ociv1Image(ctx)
+ tree, err := i.imageruntime.layerTree()
if err != nil {
- if errors.Cause(err) == ErrImageIsBareList {
- return nil, nil
- }
return nil, err
}
- for _, img := range images {
- if img.ID() == i.ID() {
- continue
- }
- candidateLayer := img.TopLayer()
- // as a child, our top layer, if we have one, is either the
- // candidate parent's layer, or one that's derived from it, so
- // skip over any candidate image where we know that isn't the
- // case
- if childLayer != nil {
- // The child has at least one layer, so a parent would
- // have a top layer that's either the same as the child's
- // top layer or the top layer's recorded parent layer,
- // which could be an empty value.
- if candidateLayer != childLayer.Parent && candidateLayer != childLayer.ID {
- continue
- }
- } else {
- // The child has no layers, but the candidate does.
- if candidateLayer != "" {
- continue
- }
- }
- // fetch the configuration for the candidate image
- candidate, err := img.ociv1Image(ctx)
- if err != nil {
- return nil, err
- }
- // compare them
- if areParentAndChild(candidate, child) {
- return img, nil
- }
- }
- return nil, nil
+ return tree.parent(ctx, i)
}
// GetChildren returns a list of the imageIDs that depend on the image
func (i *Image) GetChildren(ctx context.Context) ([]string, error) {
- children, err := i.getChildren(ctx, 0)
+ children, err := i.getChildren(ctx, true)
if err != nil {
if errors.Cause(err) == ErrImageIsBareList {
return nil, nil
@@ -1355,62 +1291,15 @@ func (i *Image) GetChildren(ctx context.Context) ([]string, error) {
return children, nil
}
-// getChildren returns a list of at most "max" imageIDs that depend on the image
-func (i *Image) getChildren(ctx context.Context, max int) ([]string, error) {
- var children []string
-
- if _, err := i.toImageRef(ctx); err != nil {
- return nil, nil
- }
-
- images, err := i.imageruntime.GetImages()
- if err != nil {
- return nil, err
- }
-
- // fetch the configuration for the parent image
- parent, err := i.ociv1Image(ctx)
+// getChildren returns a list of imageIDs that depend on the image. If all is
+// false, only the first child image is returned.
+func (i *Image) getChildren(ctx context.Context, all bool) ([]string, error) {
+ tree, err := i.imageruntime.layerTree()
if err != nil {
return nil, err
}
- parentLayer := i.TopLayer()
- for _, img := range images {
- if img.ID() == i.ID() {
- continue
- }
- if img.TopLayer() == "" {
- if parentLayer != "" {
- // this image has no layers, but we do, so
- // it can't be derived from this one
- continue
- }
- } else {
- candidateLayer, err := img.Layer()
- if err != nil {
- return nil, err
- }
- // if this image's top layer is not our top layer, and is not
- // based on our top layer, we can skip it
- if candidateLayer.Parent != parentLayer && candidateLayer.ID != parentLayer {
- continue
- }
- }
- // fetch the configuration for the candidate image
- candidate, err := img.ociv1Image(ctx)
- if err != nil {
- return nil, err
- }
- // compare them
- if areParentAndChild(parent, candidate) {
- children = append(children, img.ID())
- }
- // if we're not building an exhaustive list, maybe we're done?
- if max > 0 && len(children) >= max {
- break
- }
- }
- return children, nil
+ return tree.children(ctx, i, all)
}
// InputIsID returns a bool if the user input for an image
@@ -1667,6 +1556,7 @@ type LayerInfo struct {
// GetLayersMapWithImageInfo returns map of image-layers, with associated information like RepoTags, parent and list of child layers.
func GetLayersMapWithImageInfo(imageruntime *Runtime) (map[string]*LayerInfo, error) {
+ // TODO: evaluate if we can reuse `layerTree` here.
// Memory allocated to store map of layers with key LayerID.
// Map will build dependency chain with ParentID and ChildID(s)
diff --git a/libpod/image/layer_tree.go b/libpod/image/layer_tree.go
new file mode 100644
index 000000000..3699655fd
--- /dev/null
+++ b/libpod/image/layer_tree.go
@@ -0,0 +1,222 @@
+package image
+
+import (
+ "context"
+
+ ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// layerTree is an internal representation of local layers.
+type layerTree struct {
+ // nodes is the actual layer tree with layer IDs being keys.
+ nodes map[string]*layerNode
+ // ociCache is a cache for Image.ID -> OCI Image. Translations are done
+ // on-demand.
+ ociCache map[string]*ociv1.Image
+}
+
+// node returns a layerNode for the specified layerID.
+func (t *layerTree) node(layerID string) *layerNode {
+ node, exists := t.nodes[layerID]
+ if !exists {
+ node = &layerNode{}
+ t.nodes[layerID] = node
+ }
+ return node
+}
+
+// toOCI returns an OCI image for the specified image.
+func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) {
+ var err error
+ oci, exists := t.ociCache[i.ID()]
+ if !exists {
+ oci, err = i.ociv1Image(ctx)
+ t.ociCache[i.ID()] = oci
+ }
+ return oci, err
+}
+
+// layerNode is a node in a layerTree. It's ID is the key in a layerTree.
+type layerNode struct {
+ children []*layerNode
+ images []*Image
+ parent *layerNode
+}
+
+// layerTree extracts a layerTree from the layers in the local storage and
+// relates them to the specified images.
+func (ir *Runtime) layerTree() (*layerTree, error) {
+ layers, err := ir.store.Layers()
+ if err != nil {
+ return nil, err
+ }
+
+ images, err := ir.GetImages()
+ if err != nil {
+ return nil, err
+ }
+
+ tree := layerTree{
+ nodes: make(map[string]*layerNode),
+ ociCache: make(map[string]*ociv1.Image),
+ }
+
+ // First build a tree purely based on layer information.
+ for _, layer := range layers {
+ node := tree.node(layer.ID)
+ if layer.Parent == "" {
+ continue
+ }
+ parent := tree.node(layer.Parent)
+ node.parent = parent
+ parent.children = append(parent.children, node)
+ }
+
+ // Now assign the images to each (top) layer.
+ for i := range images {
+ img := images[i] // do not leak loop variable outside the scope
+ topLayer := img.TopLayer()
+ if topLayer == "" {
+ continue
+ }
+ node, exists := tree.nodes[topLayer]
+ if !exists {
+ return nil, errors.Errorf("top layer %s of image %s not found in layer tree", img.TopLayer(), img.ID())
+ }
+ node.images = append(node.images, img)
+ }
+
+ return &tree, nil
+}
+
+// children returns the image IDs of children . Child images are images
+// with either the same top layer as parent or parent being the true parent
+// layer. Furthermore, the history of the parent and child images must match
+// with the parent having one history item less.
+// If all is true, all images are returned. Otherwise, the first image is
+// returned.
+func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]string, error) {
+ if parent.TopLayer() == "" {
+ return nil, nil
+ }
+
+ var children []string
+
+ parentNode, exists := t.nodes[parent.TopLayer()]
+ if !exists {
+ return nil, errors.Errorf("layer not found in layer tree: %q", parent.TopLayer())
+ }
+
+ parentID := parent.ID()
+ parentOCI, err := t.toOCI(ctx, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ // checkParent returns true if child and parent are in such a relation.
+ checkParent := func(child *Image) (bool, error) {
+ if parentID == child.ID() {
+ return false, nil
+ }
+ childOCI, err := t.toOCI(ctx, child)
+ if err != nil {
+ return false, err
+ }
+ // History check.
+ return areParentAndChild(parentOCI, childOCI), nil
+ }
+
+ // addChildrenFrom adds child images of parent to children. Returns
+ // true if any image is a child of parent.
+ addChildrenFromNode := func(node *layerNode) (bool, error) {
+ foundChildren := false
+ for _, childImage := range node.images {
+ isChild, err := checkParent(childImage)
+ if err != nil {
+ return foundChildren, err
+ }
+ if isChild {
+ foundChildren = true
+ children = append(children, childImage.ID())
+ if all {
+ return foundChildren, nil
+ }
+ }
+ }
+ return foundChildren, nil
+ }
+
+ // First check images where parent's top layer is also the parent
+ // layer.
+ for _, childNode := range parentNode.children {
+ found, err := addChildrenFromNode(childNode)
+ if err != nil {
+ return nil, err
+ }
+ if found && all {
+ return children, nil
+ }
+ }
+
+ // Now check images with the same top layer.
+ if _, err := addChildrenFromNode(parentNode); err != nil {
+ return nil, err
+ }
+
+ return children, nil
+}
+
+// parent returns the parent image or nil if no parent image could be found.
+func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) {
+ if child.TopLayer() == "" {
+ return nil, nil
+ }
+
+ node, exists := t.nodes[child.TopLayer()]
+ if !exists {
+ return nil, errors.Errorf("layer not found in layer tree: %q", child.TopLayer())
+ }
+
+ childOCI, err := t.toOCI(ctx, child)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check images from the parent node (i.e., parent layer) and images
+ // with the same layer (i.e., same top layer).
+ childID := child.ID()
+ images := node.images
+ if node.parent != nil {
+ images = append(images, node.parent.images...)
+ }
+ for _, parent := range images {
+ if parent.ID() == childID {
+ continue
+ }
+ parentOCI, err := t.toOCI(ctx, parent)
+ if err != nil {
+ return nil, err
+ }
+ // History check.
+ if areParentAndChild(parentOCI, childOCI) {
+ return parent, nil
+ }
+ }
+
+ return nil, nil
+}
+
+// hasChildrenAndParent returns true if the specified image has children and a
+// parent.
+func (t *layerTree) hasChildrenAndParent(ctx context.Context, i *Image) (bool, error) {
+ children, err := t.children(ctx, i, false)
+ if err != nil {
+ return false, err
+ }
+ if len(children) == 0 {
+ return false, nil
+ }
+ parent, err := t.parent(ctx, i)
+ return parent != nil, err
+}
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index 8c9267650..5a9ca5d8e 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -66,6 +66,12 @@ func (ir *Runtime) GetPruneImages(ctx context.Context, all bool, filterFuncs []I
if err != nil {
return nil, err
}
+
+ tree, err := ir.layerTree()
+ if err != nil {
+ return nil, err
+ }
+
for _, i := range allImages {
// filter the images based on this.
for _, filterFunc := range filterFuncs {
@@ -85,8 +91,9 @@ func (ir *Runtime) GetPruneImages(ctx context.Context, all bool, filterFuncs []I
}
}
- //skip the cache or intermediate images
- intermediate, err := i.Intermediate(ctx)
+ // skip the cache (i.e., with parent) and intermediate (i.e.,
+ // with children) images
+ intermediate, err := tree.hasChildrenAndParent(ctx, i)
if err != nil {
return nil, err
}
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index d31f0dbdc..641698d03 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -7,6 +7,7 @@ import (
"path/filepath"
"strings"
+ "github.com/containers/common/pkg/retry"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
@@ -218,7 +219,7 @@ func toLocalImageName(imageName string) string {
// pullImageFromHeuristicSource pulls an image based on inputName, which is heuristically parsed and may involve configured registries.
// Use pullImageFromReference if the source is known precisely.
-func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) {
+func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName string, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromHeuristicSource")
defer span.Finish()
@@ -247,11 +248,11 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
return nil, errors.Wrapf(err, "error determining pull goal for image %q", inputName)
}
}
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, label)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, label)
}
// pullImageFromReference pulls an image from a types.imageReference.
-func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions) ([]string, error) {
+func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.ImageReference, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions) ([]string, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "pullImageFromReference")
defer span.Finish()
@@ -264,7 +265,7 @@ func (ir *Runtime) pullImageFromReference(ctx context.Context, srcRef types.Imag
if err != nil {
return nil, errors.Wrapf(err, "error determining pull goal for image %q", transports.ImageName(srcRef))
}
- return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, nil)
+ return ir.doPullImage(ctx, sc, *goal, writer, signingOptions, dockerOptions, retryOptions, nil)
}
func cleanErrorMessage(err error) string {
@@ -274,7 +275,7 @@ func cleanErrorMessage(err error) string {
}
// doPullImage is an internal helper interpreting pullGoal. Almost everyone should call one of the callers of doPullImage instead.
-func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, label *string) ([]string, error) {
+func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goal pullGoal, writer io.Writer, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, retryOptions *retry.RetryOptions, label *string) ([]string, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "doPullImage")
defer span.Finish()
@@ -310,9 +311,11 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
return nil, err
}
}
-
- _, err = cp.Image(ctx, policyContext, imageInfo.dstRef, imageInfo.srcRef, copyOptions)
- if err != nil {
+ imageInfo := imageInfo
+ if err = retry.RetryIfNecessary(ctx, func() error {
+ _, err = cp.Image(ctx, policyContext, imageInfo.dstRef, imageInfo.srcRef, copyOptions)
+ return err
+ }, retryOptions); err != nil {
pullErrors = multierror.Append(pullErrors, err)
logrus.Debugf("Error pulling image ref %s: %v", imageInfo.srcRef.StringWithinTransport(), err)
if writer != nil {
diff --git a/libpod/logs/log.go b/libpod/logs/log.go
index c2545e188..a9554088b 100644
--- a/libpod/logs/log.go
+++ b/libpod/logs/log.go
@@ -101,11 +101,14 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
if err != nil {
if errors.Cause(err) == io.EOF {
inputs <- []string{leftover}
- close(inputs)
- break
+ } else {
+ logrus.Error(err)
}
- logrus.Error(err)
close(inputs)
+ if err := f.Close(); err != nil {
+ logrus.Error(err)
+ }
+ break
}
line := strings.Split(s+leftover, "\n")
if len(line) > 1 {
@@ -136,9 +139,6 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
}
// if we have enough loglines, we can hangup
if nllCounter >= tail {
- if err := f.Close(); err != nil {
- logrus.Error(err)
- }
break
}
}
diff --git a/libpod/logs/reversereader/reversereader.go b/libpod/logs/reversereader/reversereader.go
index 72d9ad975..4fa1a3f88 100644
--- a/libpod/logs/reversereader/reversereader.go
+++ b/libpod/logs/reversereader/reversereader.go
@@ -60,7 +60,7 @@ func (r *ReverseReader) Read() (string, error) {
if int64(n) < r.readSize {
b = b[0:n]
}
- // Set to the next page boundary
- r.offset = -r.readSize
+ // Move the offset one pagesize up
+ r.offset -= r.readSize
return string(b), nil
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 844748970..ed8f82c46 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -171,6 +171,8 @@ type slirpFeatures struct {
HasMTU bool
HasEnableSandbox bool
HasEnableSeccomp bool
+ HasOutboundAddr bool
+ HasIPv6 bool
}
type slirp4netnsCmdArg struct {
@@ -197,6 +199,8 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) {
HasMTU: strings.Contains(string(out), "--mtu"),
HasEnableSandbox: strings.Contains(string(out), "--enable-sandbox"),
HasEnableSeccomp: strings.Contains(string(out), "--enable-seccomp"),
+ HasOutboundAddr: strings.Contains(string(out), "--outbound-addr"),
+ HasIPv6: strings.Contains(string(out), "--enable-ipv6"),
}, nil
}
@@ -225,21 +229,64 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
isSlirpHostForward := false
disableHostLoopback := true
+ enableIPv6 := false
+ outboundAddr := ""
+ outboundAddr6 := ""
+
if ctr.config.NetworkOptions != nil {
slirpOptions := ctr.config.NetworkOptions["slirp4netns"]
for _, o := range slirpOptions {
- switch o {
- case "port_handler=slirp4netns":
- isSlirpHostForward = true
- case "port_handler=rootlesskit":
- isSlirpHostForward = false
- case "allow_host_loopback=true":
- disableHostLoopback = false
- case "allow_host_loopback=false":
- disableHostLoopback = true
+ parts := strings.Split(o, "=")
+ option, value := parts[0], parts[1]
+
+ switch option {
+ case "port_handler":
+ switch value {
+ case "slirp4netns":
+ isSlirpHostForward = true
+ case "rootlesskit":
+ isSlirpHostForward = false
+ default:
+ return errors.Errorf("unknown port_handler for slirp4netns: %q", value)
+ }
+ case "allow_host_loopback":
+ switch value {
+ case "true":
+ disableHostLoopback = false
+ case "false":
+ disableHostLoopback = true
+ default:
+ return errors.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value)
+ }
+ case "enable_ipv6":
+ switch value {
+ case "true":
+ enableIPv6 = true
+ case "false":
+ enableIPv6 = false
+ default:
+ return errors.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value)
+ }
+ case "outbound_addr":
+ ipv4 := net.ParseIP(value)
+ if ipv4 == nil || ipv4.To4() == nil {
+ _, err := net.InterfaceByName(value)
+ if err != nil {
+ return errors.Errorf("invalid outbound_addr %q", value)
+ }
+ }
+ outboundAddr = value
+ case "outbound_addr6":
+ ipv6 := net.ParseIP(value)
+ if ipv6 == nil || ipv6.To4() != nil {
+ _, err := net.InterfaceByName(value)
+ if err != nil {
+ return errors.Errorf("invalid outbound_addr6: %q", value)
+ }
+ }
+ outboundAddr6 = value
default:
return errors.Errorf("unknown option for slirp4netns: %q", o)
-
}
}
}
@@ -262,6 +309,30 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
cmdArgs = append(cmdArgs, "--enable-seccomp")
}
+ if enableIPv6 {
+ if !slirpFeatures.HasIPv6 {
+ return errors.Errorf("enable_ipv6 not supported")
+ }
+ cmdArgs = append(cmdArgs, "--enable-ipv6")
+ }
+
+ if outboundAddr != "" {
+ if !slirpFeatures.HasOutboundAddr {
+ return errors.Errorf("outbound_addr not supported")
+ }
+ cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr=%s", outboundAddr))
+ }
+
+ if outboundAddr6 != "" {
+ if !slirpFeatures.HasOutboundAddr || !slirpFeatures.HasIPv6 {
+ return errors.Errorf("outbound_addr6 not supported")
+ }
+ if !enableIPv6 {
+ return errors.Errorf("enable_ipv6=true is required for outbound_addr6")
+ }
+ cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr6=%s", outboundAddr6))
+ }
+
var apiSocket string
if havePortMapping && isSlirpHostForward {
apiSocket = filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("%s.net", ctr.config.ID))
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index f8d87759a..cfe3745fa 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -449,9 +449,12 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
return nil, nil, err
}
+ var filesToClose []*os.File
if options.PreserveFDs > 0 {
for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
- execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
+ f := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd))
+ filesToClose = append(filesToClose, f)
+ execCmd.ExtraFiles = append(execCmd.ExtraFiles, f)
}
}
@@ -483,14 +486,10 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
return nil, nil, err
}
- if options.PreserveFDs > 0 {
- for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
- // These fds were passed down to the runtime. Close them
- // and not interfere
- if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
- logrus.Debugf("unable to close file fd-%d", fd)
- }
- }
+ // These fds were passed down to the runtime. Close them
+ // and not interfere
+ for _, f := range filesToClose {
+ errorhandling.CloseQuiet(f)
}
return execCmd, pipes, nil
diff --git a/libpod/options.go b/libpod/options.go
index b98ef2221..16b05d9b6 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1451,6 +1451,19 @@ func WithCreateCommand(cmd []string) CtrCreateOption {
}
}
+// WithCreateWorkingDir tells Podman to create the container's working directory
+// if it does not exist.
+func WithCreateWorkingDir() CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ ctr.config.CreateWorkingDir = true
+ return nil
+ }
+}
+
// Volume Creation Options
// WithVolumeName sets the name of the volume.
diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json
index 98ed710a4..8eeb4f470 100644
--- a/nix/nixpkgs.json
+++ b/nix/nixpkgs.json
@@ -1,7 +1,7 @@
{
"url": "https://github.com/nixos/nixpkgs",
- "rev": "02591d02a910b3b92092153c5f3419a8d696aa1d",
- "date": "2020-07-09T03:52:28+02:00",
- "sha256": "1pp9v4rqmgx1b298gxix8b79m8pvxy1rcf8l25rxxxxnkr5ls1ng",
+ "rev": "b49e7987632e4c7ab3a093fdfc433e1826c4b9d7",
+ "date": "2020-07-26T09:18:52+02:00",
+ "sha256": "1mj6fy0p24izmasl653s5z4f2ka9v3b6mys45kjrqmkv889yk2r6",
"fetchSubmodules": false
}
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index 3005063a7..9601f5e18 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/podman/v2/pkg/api/handlers/utils"
"github.com/containers/storage/pkg/archive"
"github.com/gorilla/schema"
+ "github.com/sirupsen/logrus"
)
func BuildImage(w http.ResponseWriter, r *http.Request) {
@@ -33,7 +34,13 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
}
if hdr, found := r.Header["Content-Type"]; found && len(hdr) > 0 {
- if hdr[0] != "application/x-tar" {
+ contentType := hdr[0]
+ switch contentType {
+ case "application/tar":
+ logrus.Warnf("tar file content type is %s, should use \"application/x-tar\" content type", contentType)
+ case "application/x-tar":
+ break
+ default:
utils.BadRequest(w, "Content-Type", hdr[0],
fmt.Errorf("Content-Type: %s is not supported. Should be \"application/x-tar\"", hdr[0]))
return
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index 1e80cc91d..80b7505df 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -10,6 +10,7 @@ import (
"github.com/containernetworking/cni/libcni"
"github.com/containers/podman/v2/libpod"
+ "github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/api/handlers/utils"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/domain/infra/abi"
@@ -44,9 +45,7 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) {
name := utils.GetName(r)
_, err = network.InspectNetwork(config, name)
if err != nil {
- // TODO our network package does not distinguish between not finding a
- // specific network vs not being able to read it
- utils.InternalServerError(w, err)
+ utils.NetworkNotFound(w, name, err)
return
}
report, err := getNetworkResourceByName(name, runtime)
@@ -285,7 +284,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
return
}
if !exists {
- utils.Error(w, "network not found", http.StatusNotFound, network.ErrNetworkNotFound)
+ utils.Error(w, "network not found", http.StatusNotFound, define.ErrNoSuchNetwork)
return
}
if err := network.RemoveNetwork(config, name); err != nil {
diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go
index 864775fe4..47ea6c40d 100644
--- a/pkg/api/handlers/libpod/containers.go
+++ b/pkg/api/handlers/libpod/containers.go
@@ -24,6 +24,7 @@ func ContainerExists(w http.ResponseWriter, r *http.Request) {
if err != nil {
if errors.Cause(err) == define.ErrNoSuchCtr {
utils.ContainerNotFound(w, name, err)
+ return
}
utils.InternalServerError(w, err)
return
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index 3421f0836..51013acf1 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -594,11 +594,9 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
return
}
- // I know mitr hates this ... but doing for now
- if len(query.Repo) > 1 {
+ if len(query.Repo) > 0 {
destImage = fmt.Sprintf("%s:%s", query.Repo, tag)
}
-
commitImage, err := ctr.Commit(r.Context(), destImage, options)
if err != nil && !strings.Contains(err.Error(), "is not running") {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrapf(err, "CommitFailure"))
@@ -638,6 +636,7 @@ func SearchImages(w http.ResponseWriter, r *http.Request) {
query := struct {
Term string `json:"term"`
Limit int `json:"limit"`
+ NoTrunc bool `json:"noTrunc"`
Filters []string `json:"filters"`
TLSVerify bool `json:"tlsVerify"`
}{
@@ -650,7 +649,8 @@ func SearchImages(w http.ResponseWriter, r *http.Request) {
}
options := image.SearchOptions{
- Limit: query.Limit,
+ Limit: query.Limit,
+ NoTrunc: query.NoTrunc,
}
if _, found := r.URL.Query()["tlsVerify"]; found {
options.InsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
@@ -677,7 +677,7 @@ func SearchImages(w http.ResponseWriter, r *http.Request) {
for i := range searchResults {
reports[i].Index = searchResults[i].Index
reports[i].Name = searchResults[i].Name
- reports[i].Description = searchResults[i].Index
+ reports[i].Description = searchResults[i].Description
reports[i].Stars = searchResults[i].Stars
reports[i].Official = searchResults[i].Official
reports[i].Automated = searchResults[i].Automated
diff --git a/pkg/api/handlers/libpod/networks.go b/pkg/api/handlers/libpod/networks.go
index 9237a41ce..475522664 100644
--- a/pkg/api/handlers/libpod/networks.go
+++ b/pkg/api/handlers/libpod/networks.go
@@ -5,10 +5,10 @@ import (
"net/http"
"github.com/containers/podman/v2/libpod"
+ "github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/api/handlers/utils"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/containers/podman/v2/pkg/domain/infra/abi"
- "github.com/containers/podman/v2/pkg/network"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
@@ -78,7 +78,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
}
if reports[0].Err != nil {
// If the network cannot be found, we return a 404.
- if errors.Cause(err) == network.ErrNetworkNotFound {
+ if errors.Cause(err) == define.ErrNoSuchNetwork {
utils.Error(w, "Something went wrong", http.StatusNotFound, err)
return
}
@@ -104,7 +104,7 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) {
reports, err := ic.NetworkInspect(r.Context(), []string{name}, options)
if err != nil {
// If the network cannot be found, we return a 404.
- if errors.Cause(err) == network.ErrNetworkNotFound {
+ if errors.Cause(err) == define.ErrNoSuchNetwork {
utils.Error(w, "Something went wrong", http.StatusNotFound, err)
return
}
diff --git a/pkg/api/handlers/utils/errors.go b/pkg/api/handlers/utils/errors.go
index 5a99529c6..bf9b18960 100644
--- a/pkg/api/handlers/utils/errors.go
+++ b/pkg/api/handlers/utils/errors.go
@@ -39,6 +39,7 @@ func VolumeNotFound(w http.ResponseWriter, name string, err error) {
msg := fmt.Sprintf("No such volume: %s", name)
Error(w, msg, http.StatusNotFound, err)
}
+
func ContainerNotFound(w http.ResponseWriter, name string, err error) {
if errors.Cause(err) != define.ErrNoSuchCtr {
InternalServerError(w, err)
@@ -55,6 +56,14 @@ func ImageNotFound(w http.ResponseWriter, name string, err error) {
Error(w, msg, http.StatusNotFound, err)
}
+func NetworkNotFound(w http.ResponseWriter, name string, err error) {
+ if errors.Cause(err) != define.ErrNoSuchNetwork {
+ InternalServerError(w, err)
+ }
+ msg := fmt.Sprintf("No such network: %s", name)
+ Error(w, msg, http.StatusNotFound, err)
+}
+
func PodNotFound(w http.ResponseWriter, name string, err error) {
if errors.Cause(err) != define.ErrNoSuchPod {
InternalServerError(w, err)
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index 28b1dda38..aad00c93b 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -102,20 +102,14 @@ func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
if query.All {
return images, nil
}
- returnImages := []*image.Image{}
- for _, img := range images {
- if len(img.Names()) == 0 {
- parent, err := img.IsParent(r.Context())
- if err != nil {
- return nil, err
- }
- if parent {
- continue
- }
- }
- returnImages = append(returnImages, img)
+
+ filter, err := runtime.ImageRuntime().IntermediateFilter(r.Context(), images)
+ if err != nil {
+ return nil, err
}
- return returnImages, nil
+ images = image.FilterImages(images, []image.ResultFilter{filter})
+
+ return images, nil
}
func GetImage(r *http.Request, name string) (*image.Image, error) {
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 7f060d098..cb4ce4fe7 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -972,6 +972,10 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// type: integer
// description: maximum number of results
// - in: query
+ // name: noTrunc
+ // type: boolean
+ // description: do not truncate any of the result strings
+ // - in: query
// name: filters
// type: string
// description: |
diff --git a/pkg/api/server/register_ping.go b/pkg/api/server/register_ping.go
index 4a8d2c768..4e299008c 100644
--- a/pkg/api/server/register_ping.go
+++ b/pkg/api/server/register_ping.go
@@ -9,9 +9,8 @@ import (
func (s *APIServer) registerPingHandlers(r *mux.Router) error {
- r.Handle("/_ping", s.APIHandler(compat.Ping)).Methods(http.MethodGet)
- r.Handle("/_ping", s.APIHandler(compat.Ping)).Methods(http.MethodHead)
-
+ r.Handle("/_ping", s.APIHandler(compat.Ping)).Methods(http.MethodGet, http.MethodHead)
+ r.Handle(VersionedPath("/_ping"), s.APIHandler(compat.Ping)).Methods(http.MethodGet, http.MethodHead)
// swagger:operation GET /libpod/_ping libpod libpodPingGet
// ---
// summary: Ping service
@@ -62,7 +61,7 @@ func (s *APIServer) registerPingHandlers(r *mux.Router) error {
// determine if talking to Podman engine or another engine
// 500:
// $ref: "#/responses/InternalError"
- r.Handle("/libpod/_ping", s.APIHandler(compat.Ping)).Methods(http.MethodGet)
- r.Handle("/libpod/_ping", s.APIHandler(compat.Ping)).Methods(http.MethodHead)
+ r.Handle("/libpod/_ping", s.APIHandler(compat.Ping)).Methods(http.MethodGet, http.MethodHead)
+ r.Handle(VersionedPath("/libpod/_ping"), s.APIHandler(compat.Ping)).Methods(http.MethodGet, http.MethodHead)
return nil
}
diff --git a/pkg/api/server/register_volumes.go b/pkg/api/server/register_volumes.go
index b509a332a..8f7848ed4 100644
--- a/pkg/api/server/register_volumes.go
+++ b/pkg/api/server/register_volumes.go
@@ -128,7 +128,7 @@ func (s *APIServer) registerVolumeHandlers(r *mux.Router) error {
// The boolean `dangling` filter is not yet implemented for this endpoint.
// responses:
// '200':
- // "$ref": "#/responses/DockerVolumeList"
+ // "$ref": "#/responses/VolumeListResponse"
// '500':
// "$ref": "#/responses/InternalError"
r.Handle(VersionedPath("/volumes"), s.APIHandler(compat.ListVolumes)).Methods(http.MethodGet)
diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go
index 9e6dbef19..9913b773b 100644
--- a/pkg/bindings/containers/containers.go
+++ b/pkg/bindings/containers/containers.go
@@ -98,7 +98,7 @@ func Remove(ctx context.Context, nameOrID string, force, volumes *bool) error {
params.Set("force", strconv.FormatBool(*force))
}
if volumes != nil {
- params.Set("vols", strconv.FormatBool(*volumes))
+ params.Set("v", strconv.FormatBool(*volumes))
}
response, err := conn.DoRequest(nil, http.MethodDelete, "/containers/%s", params, nil, nameOrID)
if err != nil {
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index fc8c9996e..12d1a9ce9 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -439,6 +439,7 @@ func Search(ctx context.Context, term string, opts entities.ImageSearchOptions)
params := url.Values{}
params.Set("term", term)
params.Set("limit", strconv.Itoa(opts.Limit))
+ params.Set("noTrunc", strconv.FormatBool(opts.NoTrunc))
for _, f := range opts.Filters {
params.Set("filters", f)
}
diff --git a/pkg/domain/entities/volumes.go b/pkg/domain/entities/volumes.go
index 2311d1f25..53d30ffdf 100644
--- a/pkg/domain/entities/volumes.go
+++ b/pkg/domain/entities/volumes.go
@@ -59,6 +59,42 @@ type VolumeConfigResponse struct {
Anonymous bool `json:"Anonymous"`
}
+// VolumeInfo Volume list response
+// swagger:model VolumeInfo
+type VolumeInfo struct {
+
+ // Date/Time the volume was created.
+ CreatedAt string `json:"CreatedAt,omitempty"`
+
+ // Name of the volume driver used by the volume. Only supports local driver
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // User-defined key/value metadata.
+ // Always included
+ Labels map[string]string `json:"Labels"`
+
+ // Mount path of the volume on the host.
+ // Required: true
+ Mountpoint string `json:"Mountpoint"`
+
+ // Name of the volume.
+ // Required: true
+ Name string `json:"Name"`
+
+ // The driver specific options used when creating the volume.
+ // Required: true
+ Options map[string]string `json:"Options"`
+
+ // The level at which the volume exists.
+ // Libpod does not implement volume scoping, and this is provided solely for
+ // Docker compatibility. The value is only "local".
+ // Required: true
+ Scope string `json:"Scope"`
+
+ // TODO: We don't include the volume `Status` for now
+}
+
type VolumeRmOptions struct {
All bool
Force bool
@@ -94,17 +130,25 @@ type VolumeListReport struct {
VolumeConfigResponse
}
-/*
- * Docker API compatibility types
- */
-// swagger:response DockerVolumeList
-type SwagDockerVolumeListResponse struct {
+// VolumeListBody Volume list response
+// swagger:model VolumeListBody
+type VolumeListBody struct {
+ Volumes []*VolumeInfo
+}
+
+// Volume list response
+// swagger:response VolumeListResponse
+type SwagVolumeListResponse struct {
// in:body
Body struct {
- docker_api_types_volume.VolumeListOKBody
+ VolumeListBody
}
}
+/*
+ * Docker API compatibility types
+ */
+
// swagger:model DockerVolumeCreate
type DockerVolumeCreate docker_api_types_volume.VolumeCreateBody
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 05adc40fe..35675e1f3 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"net/url"
"os"
+ "path"
"path/filepath"
"strconv"
"strings"
@@ -682,10 +683,6 @@ func (ir *ImageEngine) Shutdown(_ context.Context) {
}
func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
- dockerRegistryOptions := image.DockerRegistryOptions{
- DockerCertPath: options.CertDir,
- }
-
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
return nil, errors.Wrap(err, "error initializing GPG")
@@ -704,7 +701,6 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
}
for _, signimage := range names {
- var sigStoreDir string
srcRef, err := alltransports.ParseImageName(signimage)
if err != nil {
return nil, errors.Wrapf(err, "error parsing image name")
@@ -725,40 +721,38 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
if dockerReference == nil {
return nil, errors.Errorf("cannot determine canonical Docker reference for destination %s", transports.ImageName(rawSource.Reference()))
}
-
- // create the signstore file
- rtc, err := ir.Libpod.GetConfig()
- if err != nil {
- return nil, err
- }
- newImage, err := ir.Libpod.ImageRuntime().New(ctx, signimage, rtc.Engine.SignaturePolicyPath, "", os.Stderr, &dockerRegistryOptions, image.SigningOptions{SignBy: options.SignBy}, nil, util.PullImageMissing)
- if err != nil {
- return nil, errors.Wrapf(err, "error pulling image %s", signimage)
+ var sigStoreDir string
+ if options.Directory != "" {
+ sigStoreDir = options.Directory
}
if sigStoreDir == "" {
if rootless.IsRootless() {
sigStoreDir = filepath.Join(filepath.Dir(ir.Libpod.StorageConfig().GraphRoot), "sigstore")
} else {
+ var sigStoreURI string
registryInfo := trust.HaveMatchRegistry(rawSource.Reference().DockerReference().String(), registryConfigs)
if registryInfo != nil {
- if sigStoreDir = registryInfo.SigStoreStaging; sigStoreDir == "" {
- sigStoreDir = registryInfo.SigStore
-
+ if sigStoreURI = registryInfo.SigStoreStaging; sigStoreURI == "" {
+ sigStoreURI = registryInfo.SigStore
}
}
+ if sigStoreURI == "" {
+ return nil, errors.Errorf("no signature storage configuration found for %s", rawSource.Reference().DockerReference().String())
+
+ }
+ sigStoreDir, err = localPathFromURI(sigStoreURI)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid signature storage %s", sigStoreURI)
+ }
}
}
- sigStoreDir, err = isValidSigStoreDir(sigStoreDir)
+ manifestDigest, err := manifest.Digest(getManifest)
if err != nil {
- return nil, errors.Wrapf(err, "invalid signature storage %s", sigStoreDir)
- }
- repos, err := newImage.RepoDigests()
- if err != nil {
- return nil, errors.Wrapf(err, "error calculating repo digests for %s", signimage)
+ return nil, err
}
- if len(repos) == 0 {
- logrus.Errorf("no repodigests associated with the image %s", signimage)
- continue
+ repo := reference.Path(dockerReference)
+ if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
+ return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dockerReference.String())
}
// create signature
@@ -766,22 +760,21 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
if err != nil {
return nil, errors.Wrapf(err, "error creating new signature")
}
-
- trimmedDigest := strings.TrimPrefix(repos[0], strings.Split(repos[0], "/")[0])
- sigStoreDir = filepath.Join(sigStoreDir, strings.Replace(trimmedDigest, ":", "=", 1))
- if err := os.MkdirAll(sigStoreDir, 0751); err != nil {
+ // create the signstore file
+ signatureDir := fmt.Sprintf("%s@%s=%s", filepath.Join(sigStoreDir, repo), manifestDigest.Algorithm(), manifestDigest.Hex())
+ if err := os.MkdirAll(signatureDir, 0751); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
- logrus.Errorf("error creating directory %s: %s", sigStoreDir, err)
+ logrus.Errorf("error creating directory %s: %s", signatureDir, err)
continue
}
}
- sigFilename, err := getSigFilename(sigStoreDir)
+ sigFilename, err := getSigFilename(signatureDir)
if err != nil {
logrus.Errorf("error creating sigstore file: %v", err)
continue
}
- err = ioutil.WriteFile(filepath.Join(sigStoreDir, sigFilename), newSig, 0644)
+ err = ioutil.WriteFile(filepath.Join(signatureDir, sigFilename), newSig, 0644)
if err != nil {
logrus.Errorf("error storing signature for %s", rawSource.Reference().DockerReference().String())
continue
@@ -809,14 +802,12 @@ func getSigFilename(sigStoreDirPath string) (string, error) {
}
}
-func isValidSigStoreDir(sigStoreDir string) (string, error) {
- writeURIs := map[string]bool{"file": true}
+func localPathFromURI(sigStoreDir string) (string, error) {
url, err := url.Parse(sigStoreDir)
if err != nil {
return sigStoreDir, errors.Wrapf(err, "invalid directory %s", sigStoreDir)
}
- _, exists := writeURIs[url.Scheme]
- if !exists {
+ if url.Scheme != "file" {
return sigStoreDir, errors.Errorf("writing to %s is not supported. Use a supported scheme", sigStoreDir)
}
sigStoreDir = url.Path
diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go
index 11e2ddb39..7ec84246d 100644
--- a/pkg/domain/infra/abi/images_list.go
+++ b/pkg/domain/infra/abi/images_list.go
@@ -13,6 +13,14 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
return nil, err
}
+ if !opts.All {
+ filter, err := ir.Libpod.ImageRuntime().IntermediateFilter(ctx, images)
+ if err != nil {
+ return nil, err
+ }
+ images = libpodImage.FilterImages(images, []libpodImage.ResultFilter{filter})
+ }
+
summaries := []*entities.ImageSummary{}
for _, img := range images {
var repoTags []string
@@ -32,15 +40,6 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
if err != nil {
return nil, err
}
- if len(img.Names()) == 0 {
- parent, err := img.IsParent(ctx)
- if err != nil {
- return nil, err
- }
- if parent {
- continue
- }
- }
}
digests := make([]string, len(img.Digests()))
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 1fad67b86..d2221ab7b 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -500,9 +500,6 @@ func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.C
}
func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.ContainerRunOptions) (*entities.ContainerRunReport, error) {
- if opts.Rm {
- logrus.Info("the remote client does not support --rm yet")
- }
con, err := containers.CreateWithSpec(ic.ClientCxt, opts.Spec)
if err != nil {
return nil, err
@@ -526,6 +523,17 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
if err != nil {
report.ExitCode = define.ExitCode(err)
}
+ if opts.Rm {
+ if err := containers.Remove(ic.ClientCxt, con.ID, bindings.PFalse, bindings.PTrue); err != nil {
+ if errors.Cause(err) == define.ErrNoSuchCtr ||
+ errors.Cause(err) == define.ErrCtrRemoved {
+ logrus.Warnf("Container %s does not exist: %v", con.ID, err)
+ } else {
+ logrus.Errorf("Error removing container %s: %v", con.ID, err)
+ }
+ }
+ }
+
return &report, err
}
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index 6845d01c0..c7bfdcd2b 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -196,7 +196,11 @@ func (ir *ImageEngine) Load(ctx context.Context, opts entities.ImageLoadOptions)
return nil, err
}
defer f.Close()
- return images.Load(ir.ClientCxt, f, &opts.Name)
+ ref := opts.Name
+ if len(opts.Tag) > 0 {
+ ref += ":" + opts.Tag
+ }
+ return images.Load(ir.ClientCxt, f, &ref)
}
func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOptions) (*entities.ImageImportReport, error) {
diff --git a/pkg/namespaces/namespaces.go b/pkg/namespaces/namespaces.go
index 7831af8f9..c35f68e02 100644
--- a/pkg/namespaces/namespaces.go
+++ b/pkg/namespaces/namespaces.go
@@ -91,7 +91,7 @@ func (n UsernsMode) IsHost() bool {
return n == hostType
}
-// IsKeepID indicates whether container uses a mapping where the (uid, gid) on the host is lept inside of the namespace.
+// IsKeepID indicates whether container uses a mapping where the (uid, gid) on the host is kept inside of the namespace.
func (n UsernsMode) IsKeepID() bool {
return n == "keep-id"
}
diff --git a/pkg/network/config.go b/pkg/network/config.go
index a504e0ad0..0115433e1 100644
--- a/pkg/network/config.go
+++ b/pkg/network/config.go
@@ -2,7 +2,6 @@ package network
import (
"encoding/json"
- "errors"
"net"
)
@@ -20,10 +19,6 @@ const (
DefaultPodmanDomainName = "dns.podman"
)
-var (
- ErrNetworkNotFound = errors.New("network not found")
-)
-
// GetDefaultPodmanNetwork outputs the default network for podman
func GetDefaultPodmanNetwork() (*net.IPNet, error) {
_, n, err := net.ParseCIDR("10.88.1.0/24")
diff --git a/pkg/network/files.go b/pkg/network/files.go
index beb3289f3..38ce38b97 100644
--- a/pkg/network/files.go
+++ b/pkg/network/files.go
@@ -10,6 +10,7 @@ import (
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator"
"github.com/containers/common/pkg/config"
+ "github.com/containers/podman/v2/libpod/define"
"github.com/pkg/errors"
)
@@ -55,7 +56,7 @@ func GetCNIConfigPathByName(config *config.Config, name string) (string, error)
return confFile, nil
}
}
- return "", errors.Wrap(ErrNetworkNotFound, fmt.Sprintf("unable to find network configuration for %s", name))
+ return "", errors.Wrap(define.ErrNoSuchNetwork, fmt.Sprintf("unable to find network configuration for %s", name))
}
// ReadRawCNIConfByName reads the raw CNI configuration for a CNI
diff --git a/pkg/network/network.go b/pkg/network/network.go
index 6c84c8a8a..b24c72f5f 100644
--- a/pkg/network/network.go
+++ b/pkg/network/network.go
@@ -8,6 +8,7 @@ import (
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator"
"github.com/containers/common/pkg/config"
+ "github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -200,7 +201,7 @@ func InspectNetwork(config *config.Config, name string) (map[string]interface{},
func Exists(config *config.Config, name string) (bool, error) {
_, err := ReadRawCNIConfByName(config, name)
if err != nil {
- if errors.Cause(err) == ErrNetworkNotFound {
+ if errors.Cause(err) == define.ErrNoSuchNetwork {
return false, nil
}
return false, err
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index eaf2d4551..2e1fddc48 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -205,7 +205,7 @@ can_use_shortcut ()
if (strcmp (argv[argc], "mount") == 0
|| strcmp (argv[argc], "search") == 0
- || strcmp (argv[argc], "system") == 0)
+ || (strcmp (argv[argc], "system") == 0 && argv[argc+1] && strcmp (argv[argc+1], "service") != 0))
{
ret = false;
break;
@@ -225,6 +225,16 @@ can_use_shortcut ()
return ret;
}
+int
+is_fd_inherited(int fd)
+{
+ if (open_files_set == NULL || fd > open_files_max_fd || fd < 0)
+ {
+ return 0;
+ }
+ return FD_ISSET(fd % FD_SETSIZE, &(open_files_set[fd / FD_SETSIZE])) ? 1 : 0;
+}
+
static void __attribute__((constructor)) init()
{
const char *xdg_runtime_dir;
diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go
index ccc8a1d94..c3f1fc7fa 100644
--- a/pkg/rootless/rootless_linux.go
+++ b/pkg/rootless/rootless_linux.go
@@ -32,6 +32,7 @@ extern uid_t rootless_gid();
extern int reexec_in_user_namespace(int ready, char *pause_pid_file_path, char *file_to_read, int fd);
extern int reexec_in_user_namespace_wait(int pid, int options);
extern int reexec_userns_join(int pid, char *pause_pid_file_path);
+extern int is_fd_inherited(int fd);
*/
import "C"
@@ -520,3 +521,8 @@ func ConfigurationMatches() (bool, error) {
return matches(GetRootlessGID(), gids, currentGIDs), nil
}
+
+// IsFdInherited checks whether the fd is opened and valid to use
+func IsFdInherited(fd int) bool {
+ return int(C.is_fd_inherited(C.int(fd))) > 0
+}
diff --git a/pkg/rootless/rootless_unsupported.go b/pkg/rootless/rootless_unsupported.go
index 1499b737f..7dfb4a4b2 100644
--- a/pkg/rootless/rootless_unsupported.go
+++ b/pkg/rootless/rootless_unsupported.go
@@ -64,3 +64,8 @@ func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) {
func ReadMappingsProc(path string) ([]idtools.IDMap, error) {
return nil, nil
}
+
+// IsFdInherited checks whether the fd is opened and valid to use
+func IsFdInherited(fd int) bool {
+ return false
+}
diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go
index 9dfb35be3..b61ac2c30 100644
--- a/pkg/specgen/generate/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -238,6 +238,17 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen.
if s.Entrypoint != nil {
options = append(options, libpod.WithEntrypoint(s.Entrypoint))
}
+ // If the user did not set an workdir but the image did, ensure it is
+ // created.
+ if s.WorkDir == "" && img != nil {
+ newWD, err := img.WorkingDir(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if newWD != "" {
+ options = append(options, libpod.WithCreateWorkingDir())
+ }
+ }
if s.StopSignal != nil {
options = append(options, libpod.WithStopSignal(*s.StopSignal))
}
diff --git a/pkg/specgen/generate/namespaces.go b/pkg/specgen/generate/namespaces.go
index b8ab1399e..7adb8be6a 100644
--- a/pkg/specgen/generate/namespaces.go
+++ b/pkg/specgen/generate/namespaces.go
@@ -462,6 +462,10 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt
func GetNamespaceOptions(ns []string) ([]libpod.PodCreateOption, error) {
var options []libpod.PodCreateOption
var erroredOptions []libpod.PodCreateOption
+ if ns == nil {
+ //set the default namespaces
+ ns = strings.Split(specgen.DefaultKernelNamespaces, ",")
+ }
for _, toShare := range ns {
switch toShare {
case "cgroup":
diff --git a/pkg/trust/trust.go b/pkg/trust/trust.go
index 60de099fa..2348bc410 100644
--- a/pkg/trust/trust.go
+++ b/pkg/trust/trust.go
@@ -12,9 +12,9 @@ import (
"strings"
"github.com/containers/image/v5/types"
+ "github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "gopkg.in/yaml.v2"
)
// PolicyContent struct for policy.json file
@@ -157,7 +157,7 @@ func HaveMatchRegistry(key string, registryConfigs *RegistryConfiguration) *Regi
searchKey = searchKey[:strings.LastIndex(searchKey, "/")]
}
}
- return nil
+ return registryConfigs.DefaultDocker
}
// CreateTmpFile creates a temp file under dir and writes the content into it
diff --git a/test/apiv2/01-basic.at b/test/apiv2/01-basic.at
index 79dac990a..96b6aef7c 100644
--- a/test/apiv2/01-basic.at
+++ b/test/apiv2/01-basic.at
@@ -5,9 +5,15 @@
# NOTE: paths with a leading slash will be interpreted as-is;
# paths without will have '/v1.40/' prepended.
-t GET /_ping 200 OK
+t GET /_ping 200 OK
t HEAD /_ping 200
t GET /libpod/_ping 200 OK
+t HEAD /libpod/_ping 200
+
+t GET _ping 200 OK
+t HEAD _ping 200
+t GET libpod/_ping 200 OK
+t HEAD libpod/_ping 200
for i in /version version; do
t GET $i 200 \
diff --git a/test/apiv2/35-networks.at b/test/apiv2/35-networks.at
new file mode 100644
index 000000000..fff3f3b1f
--- /dev/null
+++ b/test/apiv2/35-networks.at
@@ -0,0 +1,8 @@
+# -*- sh -*-
+#
+# network-related tests
+#
+
+t GET /networks/non-existing-network 404
+
+# vim: filetype=sh
diff --git a/test/apiv2/rest_api/test_rest_v1_0_0.py b/test/apiv2/rest_api/test_rest_v1_0_0.py
index 7c53623cb..2e574e015 100644
--- a/test/apiv2/rest_api/test_rest_v1_0_0.py
+++ b/test/apiv2/rest_api/test_rest_v1_0_0.py
@@ -13,9 +13,11 @@ from multiprocessing import Process
import requests
from dateutil.parser import parse
+PODMAN_URL = "http://localhost:8080"
+
def _url(path):
- return "http://localhost:8080/v1.0.0/libpod" + path
+ return PODMAN_URL + "/v1.0.0/libpod" + path
def podman():
@@ -205,7 +207,21 @@ class TestApi(unittest.TestCase):
search.join(timeout=10)
self.assertFalse(search.is_alive(), "/images/search took too long")
- def validateObjectFields(self, buffer):
+ def test_ping(self):
+ r = requests.get(PODMAN_URL + "/_ping")
+ self.assertEqual(r.status_code, 200, r.text)
+
+ r = requests.head(PODMAN_URL + "/_ping")
+ self.assertEqual(r.status_code, 200, r.text)
+
+ r = requests.get(_url("/_ping"))
+ self.assertEqual(r.status_code, 200, r.text)
+
+ r = requests.get(_url("/_ping"))
+ self.assertEqual(r.status_code, 200, r.text)
+
+
+def validateObjectFields(self, buffer):
objs = json.loads(buffer)
if not isinstance(objs, dict):
for o in objs:
@@ -214,6 +230,5 @@ class TestApi(unittest.TestCase):
_ = objs["Id"]
return objs
-
if __name__ == '__main__':
unittest.main()
diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go
index 568ee080d..c122ce50f 100644
--- a/test/e2e/commit_test.go
+++ b/test/e2e/commit_test.go
@@ -49,6 +49,21 @@ var _ = Describe("Podman commit", func() {
Expect(StringInSlice("foobar.com/test1-image:latest", data[0].RepoTags)).To(BeTrue())
})
+ It("podman commit single letter container", func() {
+ _, ec, _ := podmanTest.RunLsContainer("test1")
+ Expect(ec).To(Equal(0))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+
+ session := podmanTest.Podman([]string{"commit", "test1", "a"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ check := podmanTest.Podman([]string{"inspect", "localhost/a:latest"})
+ check.WaitWithDefaultTimeout()
+ data := check.InspectImageJSON()
+ Expect(StringInSlice("localhost/a:latest", data[0].RepoTags)).To(BeTrue())
+ })
+
It("podman container commit container", func() {
_, ec, _ := podmanTest.RunLsContainer("test1")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
index f5d15d3bd..055546f88 100644
--- a/test/e2e/exec_test.go
+++ b/test/e2e/exec_test.go
@@ -210,7 +210,6 @@ var _ = Describe("Podman exec", func() {
})
It("podman exec missing working directory test", func() {
- Skip(v2remotefail)
setup := podmanTest.RunTopContainer("test1")
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
@@ -225,7 +224,6 @@ var _ = Describe("Podman exec", func() {
})
It("podman exec cannot be invoked", func() {
- Skip(v2remotefail)
setup := podmanTest.RunTopContainer("test1")
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
@@ -236,7 +234,6 @@ var _ = Describe("Podman exec", func() {
})
It("podman exec command not found", func() {
- Skip(v2remotefail)
setup := podmanTest.RunTopContainer("test1")
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
diff --git a/test/e2e/image_sign_test.go b/test/e2e/image_sign_test.go
new file mode 100644
index 000000000..c54cf433d
--- /dev/null
+++ b/test/e2e/image_sign_test.go
@@ -0,0 +1,62 @@
+// +build !remote
+
+package integration
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ . "github.com/containers/podman/v2/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman image sign", func() {
+ var (
+ origGNUPGHOME string
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.SeedImages()
+
+ tempGNUPGHOME := filepath.Join(podmanTest.TempDir, "tmpGPG")
+ err := os.Mkdir(tempGNUPGHOME, os.ModePerm)
+ Expect(err).To(BeNil())
+
+ origGNUPGHOME = os.Getenv("GNUPGHOME")
+ err = os.Setenv("GNUPGHOME", tempGNUPGHOME)
+ Expect(err).To(BeNil())
+
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+ os.Setenv("GNUPGHOME", origGNUPGHOME)
+ })
+
+ It("podman sign image", func() {
+ cmd := exec.Command("gpg", "--import", "sign/secret-key.asc")
+ err := cmd.Run()
+ Expect(err).To(BeNil())
+ sigDir := filepath.Join(podmanTest.TempDir, "test-sign")
+ err = os.MkdirAll(sigDir, os.ModePerm)
+ Expect(err).To(BeNil())
+ session := podmanTest.Podman([]string{"image", "sign", "--directory", sigDir, "--sign-by", "foo@bar.com", "docker://library/alpine"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ _, err = os.Stat(filepath.Join(sigDir, "library"))
+ Expect(err).To(BeNil())
+ })
+})
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 381336b8b..e63bce3fe 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -72,16 +72,16 @@ var _ = Describe("Podman logs", func() {
Expect(len(results.OutputToStringArray())).To(Equal(0))
})
- It("tail 99 lines", func() {
- logc := podmanTest.Podman([]string{"run", "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
+ It("tail 800 lines", func() {
+ logc := podmanTest.Podman([]string{"run", "-dt", ALPINE, "sh", "-c", "i=1; while [ \"$i\" -ne 1000 ]; do echo \"line $i\"; i=$((i + 1)); done"})
logc.WaitWithDefaultTimeout()
Expect(logc).To(Exit(0))
cid := logc.OutputToString()
- results := podmanTest.Podman([]string{"logs", "--tail", "99", cid})
+ results := podmanTest.Podman([]string{"logs", "--tail", "800", cid})
results.WaitWithDefaultTimeout()
Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
+ Expect(len(results.OutputToStringArray())).To(Equal(800))
})
It("tail 2 lines with timestamps", func() {
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 87b74052a..d735217d6 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -1,11 +1,14 @@
package integration
import (
+ "fmt"
"os"
+ "strings"
. "github.com/containers/podman/v2/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/uber/jaeger-client-go/utils"
)
var _ = Describe("Podman run networking", func() {
@@ -236,6 +239,18 @@ var _ = Describe("Podman run networking", func() {
Expect((hp1 == "4000" && hp2 == "8000") || (hp1 == "8000" && hp2 == "4000")).To(BeTrue())
})
+ It("podman run -p 0.0.0.0:8080:80", func() {
+ name := "testctr"
+ session := podmanTest.Podman([]string{"create", "-t", "-p", "0.0.0.0:8080:80", "--name", name, ALPINE, "/bin/sh"})
+ session.WaitWithDefaultTimeout()
+ inspectOut := podmanTest.InspectContainer(name)
+ Expect(len(inspectOut)).To(Equal(1))
+ Expect(len(inspectOut[0].NetworkSettings.Ports)).To(Equal(1))
+ Expect(len(inspectOut[0].NetworkSettings.Ports["80/tcp"])).To(Equal(1))
+ Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostPort).To(Equal("8080"))
+ Expect(inspectOut[0].NetworkSettings.Ports["80/tcp"][0].HostIP).To(Equal(""))
+ })
+
It("podman run network expose host port 80 to container port 8000", func() {
SkipIfRootless()
session := podmanTest.Podman([]string{"run", "-dt", "-p", "80:8000", ALPINE, "/bin/sh"})
@@ -278,6 +293,53 @@ var _ = Describe("Podman run networking", func() {
Expect(session.ExitCode()).To(Equal(0))
})
+ It("podman run network bind to 127.0.0.1", func() {
+ slirp4netnsHelp := SystemExec("slirp4netns", []string{"--help"})
+ Expect(slirp4netnsHelp.ExitCode()).To(Equal(0))
+ networkConfiguration := "slirp4netns:outbound_addr=127.0.0.1,allow_host_loopback=true"
+
+ if strings.Contains(slirp4netnsHelp.OutputToString(), "outbound-addr") {
+ ncListener := StartSystemExec("nc", []string{"-v", "-n", "-l", "-p", "8083"})
+ session := podmanTest.Podman([]string{"run", "--network", networkConfiguration, "-dt", ALPINE, "nc", "-w", "2", "10.0.2.2", "8083"})
+ session.Wait(30)
+ ncListener.Wait(30)
+
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(ncListener.ExitCode()).To(Equal(0))
+ Expect(ncListener.ErrorToString()).To(ContainSubstring("127.0.0.1"))
+ } else {
+ session := podmanTest.Podman([]string{"run", "--network", networkConfiguration, "-dt", ALPINE, "nc", "-w", "2", "10.0.2.2", "8083"})
+ session.Wait(30)
+ Expect(session.ExitCode()).ToNot(Equal(0))
+ Expect(session.ErrorToString()).To(ContainSubstring("outbound_addr not supported"))
+ }
+ })
+
+ It("podman run network bind to HostIP", func() {
+ ip, err := utils.HostIP()
+ Expect(err).To(BeNil())
+
+ slirp4netnsHelp := SystemExec("slirp4netns", []string{"--help"})
+ Expect(slirp4netnsHelp.ExitCode()).To(Equal(0))
+ networkConfiguration := fmt.Sprintf("slirp4netns:outbound_addr=%s,allow_host_loopback=true", ip.String())
+
+ if strings.Contains(slirp4netnsHelp.OutputToString(), "outbound-addr") {
+ ncListener := StartSystemExec("nc", []string{"-v", "-n", "-l", "-p", "8084"})
+ session := podmanTest.Podman([]string{"run", "--network", networkConfiguration, "-dt", ALPINE, "nc", "-w", "2", "10.0.2.2", "8084"})
+ session.Wait(30)
+ ncListener.Wait(30)
+
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(ncListener.ExitCode()).To(Equal(0))
+ Expect(ncListener.ErrorToString()).To(ContainSubstring(ip.String()))
+ } else {
+ session := podmanTest.Podman([]string{"run", "--network", networkConfiguration, "-dt", ALPINE, "nc", "-w", "2", "10.0.2.2", "8084"})
+ session.Wait(30)
+ Expect(session.ExitCode()).ToNot(Equal(0))
+ Expect(session.ErrorToString()).To(ContainSubstring("outbound_addr not supported"))
+ }
+ })
+
It("podman run network expose ports in image metadata", func() {
session := podmanTest.Podman([]string{"create", "--name", "test", "-dt", "-P", nginx})
session.Wait(90)
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 1f9cc3cb0..9cb76d1f6 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -811,8 +811,20 @@ USER mail`
Expect(len(session.OutputToStringArray())).To(Equal(1))
})
+ It("podman run --mount type=devpts,target=/foo/bar", func() {
+ SkipIfRootless()
+ session := podmanTest.Podman([]string{"run", "--mount", "type=devpts,target=/foo/bar", fedoraMinimal, "stat", "-f", "-c%T", "/foo/bar"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("devpts"))
+ })
+
It("podman run --pod automatically", func() {
- session := podmanTest.Podman([]string{"run", "--pod", "new:foobar", ALPINE, "ls"})
+ session := podmanTest.Podman([]string{"run", "-d", "--pod", "new:foobar", ALPINE, "nc", "-l", "-p", "8080"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"run", "--pod", "foobar", ALPINE, "/bin/sh", "-c", "echo test | nc -w 1 127.0.0.1 8080"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -1051,6 +1063,13 @@ USER mail`
Expect(session.ExitCode()).To(Equal(0))
})
+ It("podman run --preserve-fds invalid fd", func() {
+ session := podmanTest.Podman([]string{"run", "--preserve-fds", "2", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ Expect(session.ErrorToString()).To(ContainSubstring("file descriptor 3 is not available"))
+ })
+
It("podman run --privileged and --group-add", func() {
groupName := "kvm"
session := podmanTest.Podman([]string{"run", "-t", "-i", "--group-add", groupName, "--privileged", fedoraMinimal, "groups"})
@@ -1123,4 +1142,16 @@ USER mail`
Expect(session.ExitCode()).To(Not(Equal(0)))
Expect(session.ErrorToString()).To(ContainSubstring("Invalid umask"))
})
+
+ It("podman run makes entrypoint from image", func() {
+ // BuildImage does not seem to work remote
+ SkipIfRemote()
+ dockerfile := `FROM busybox
+WORKDIR /madethis`
+ podmanTest.BuildImage(dockerfile, "test", "false")
+ session := podmanTest.Podman([]string{"run", "--rm", "test", "pwd"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("/madethis"))
+ })
})
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index c729423a3..c4ee05af9 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -241,7 +241,7 @@ var _ = Describe("Podman run with volumes", func() {
Expect(mountCmd1.ExitCode()).To(Equal(0))
os.Stdout.Sync()
os.Stderr.Sync()
- mountOut1 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd1.Out.Contents())), " ")
+ mountOut1 := strings.Join(strings.Fields(string(mountCmd1.Out.Contents())), " ")
fmt.Printf("Output: %s", mountOut1)
Expect(strings.Contains(mountOut1, volName)).To(BeFalse())
@@ -257,7 +257,7 @@ var _ = Describe("Podman run with volumes", func() {
Expect(mountCmd2.ExitCode()).To(Equal(0))
os.Stdout.Sync()
os.Stderr.Sync()
- mountOut2 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd2.Out.Contents())), " ")
+ mountOut2 := strings.Join(strings.Fields(string(mountCmd2.Out.Contents())), " ")
fmt.Printf("Output: %s", mountOut2)
Expect(strings.Contains(mountOut2, volName)).To(BeTrue())
@@ -278,7 +278,7 @@ var _ = Describe("Podman run with volumes", func() {
Expect(mountCmd3.ExitCode()).To(Equal(0))
os.Stdout.Sync()
os.Stderr.Sync()
- mountOut3 := strings.Join(strings.Fields(fmt.Sprintf("%s", mountCmd3.Out.Contents())), " ")
+ mountOut3 := strings.Join(strings.Fields(string(mountCmd3.Out.Contents())), " ")
fmt.Printf("Output: %s", mountOut3)
Expect(strings.Contains(mountOut3, volName)).To(BeFalse())
})
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 1e7dff697..c6766fe2a 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -5,6 +5,7 @@ import (
"fmt"
"io/ioutil"
"os"
+ "regexp"
"strconv"
"text/template"
@@ -98,6 +99,15 @@ registries = ['{{.Host}}:{{.Port}}']`
Expect(search.LineInOutputContains("quay.io/libpod/gate")).To(BeTrue())
})
+ It("podman search image with description", func() {
+ search := podmanTest.Podman([]string{"search", "quay.io/libpod/whalesay"})
+ search.WaitWithDefaultTimeout()
+ Expect(search.ExitCode()).To(Equal(0))
+ output := string(search.Out.Contents())
+ match, _ := regexp.MatchString(`(?m)^quay.io\s+quay.io/libpod/whalesay\s+Static image used for automated testing.+$`, output)
+ Expect(match).To(BeTrue())
+ })
+
It("podman search format flag", func() {
search := podmanTest.Podman([]string{"search", "--format", "table {{.Index}} {{.Name}}", "alpine"})
search.WaitWithDefaultTimeout()
diff --git a/test/e2e/sign/secret-key.asc b/test/e2e/sign/secret-key.asc
new file mode 100644
index 000000000..23c0d05c3
--- /dev/null
+++ b/test/e2e/sign/secret-key.asc
@@ -0,0 +1,57 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+
+lQOYBF8kNqwBCAC0x3Kog+WlDNwcR6rWIP8Gj2T6LrQ2/3knSyAWzTgC/OBB6Oh0
+KAokXLjy8J3diG3EaSltE7erGG/bZCz8jYvMiwDJScON4zzidotqjoY80E+NeRDg
+CC0gqvqmh0ftJIjYNBHzSxqrGRQwzwZU+u6ezlE8+0dvsHcHY+MRnxXJQrdM07EP
+Prp85kKckChDlJ1tyGUB/YHieFQmOW5+TERA7ZqQOAQ12Vviv6V4kNfEJJq3MS2c
+csZpO323tcHt3oebqsZCIElhX7uVw6GAeCw1tm4NZXs4g1yIC21Of/hzPeC18F72
+splCgKaAOiE9w/nMGLNEYy2NzgEclZLs2Y7jABEBAAEAB/9VOcwHvvrWN3xThsP2
+5CJmxNZtjfQfE4zZ5fRwW3pjCjVtTTC9hhzV7LKysZYzGPzqwksp5chKjKA7VXxR
+6ic0nHmX68MaEr2i5BExAJUveWNvxloazC/+PS0ishdKKNWs28t0n/0oGZAnvIn3
+KT+ytYCeF7ajZJWQ8dncdlvuf86I8GdmqP2Og9A67LUpJfH2rtpBjzH25nLSZ3Qz
+QbHoUIv318Wwb1sIidkmPsZufZG3pMsYjtFtJjkWt0lRsJQnSE9AQOpQTkLsVsh2
+FYQZ2ODhH8+NE86UNAAr2JiMZHoTrEUL2SLwpXEthFIR78N009dOS4nw8CLB61BL
+pr6lBADH6yoF95rI0LR3jphfr7e8K3BViTPK97wke6EqwTlVo0TCdR785sa8T8O8
+HvlYx4a+h3e6D4D0vjDPzxtevjdTjYmxqI3cwE2N3NFALgGBwHs01qRRIw4qxZlC
+1L1byJ8rUVi5z3YMO7X4RcXSAtg3fM2fx2x+sdpyH30jafuKPwQA533PVRo/Rlgt
+pOak9Fs+3KOIb+oO8ypy7RRQcnsTKajts0sUE9scBhT4tSDeSa/HaDvzLiE8KKCK
+3rhn8ZKLTW1fvKNZBj6oGlIRyfOFjtN/jMRjo0WVHSUDJ59Zr8C0khpP5J73yhTr
+fDhcuTPWiCjlDYeSFHV/a4Z45GG2Kl0EAL1I31kxnSQR9bN5ZmvV+aOhTRKOuHDm
+6nISF/XnVwuGHCvMbFRKsTxGkGrPO5VQZflFOqVab9umIQkOIcrzeKj+slYlm5VA
+zKfCQ1vZ2f74QYCNP8oeRa1r3D46fszcElZJQxtZZewYRKX63bvU4F+hql8dJTqe
+e3wVq8QD657yRwC0FGZvb2JhciA8Zm9vQGJhci5jb20+iQFUBBMBCAA+FiEERyT4
+ac7LLibByeabqaoHAy6P2bIFAl8kNqwCGwMFCQPCZwAFCwkIBwIGFQoJCAsCBBYC
+AwECHgECF4AACgkQqaoHAy6P2bKtuggAgv54/F8wgi+uMrtFr8rqNtZMDyXRxfXa
+XUy5uGNfqHD83yqxweEqxiA8lmFkRHixPWtgZ2MniFXMVc9kVmg8GNIIuzewXrPq
+tXztvuURQo9phK68v8fXEqqT6K25wtq8TiQZ0J3mQIJPPTMe3pCCOyR6+W3iMtQp
+2AmitxKbzLP3J3GG2i0rG5S147A2rPnzTeMYhds819+JE7jNMD7FkV+TcQlOVl4w
+yOQhNEJcjb6rA6EUe5+s85pIFTBSyPMJpJ03Y0dLdcSGpKdncGTK2X9+hS96G1+F
+P/t8hRIDblqUHtBRXe3Ozz6zSqpqu1DbAQSMbIrLYxXfnZEN+ro0dJ0DmARfJDas
+AQgAncvLLZUHZkJWDPka3ocysJ7+/lmrXyAjT3D4r7UM4oaLBOMKjvaKSDw1uW5q
+YmTxnnsqFDI0O5+XJxD1/0qEf6l2oUpnILdxVruf28FuvymbsyhDgs+MBoHz0jLW
+WPHUW2oWLIqcvaF0BePQ1GS6UoZlmZejsLwwcSpbaAHJng7An/iLuqOBr5EdUA5X
+MXqmdMFDrjh0uZezImJ2Eacu/hshBdu3IY49J5XP18GWrSdUnP27cv3tOii9j5Lf
+l8QAvCN89vkALIU3eZtnMlWZqLgl5o6COVFmzpyx+iHOoCznQBt0aGoSNmE/dAqW
+IQS/xCSFqMHI6kNd9N0oR0rEHwARAQABAAf+L3mAhBLJ2qDLrfSGenv3qr7zXggR
+cLnFFeIZ2BdjLIYpLku2wgN34DrJOSR4umi/bxyEMPZX07Z0rgrC0E+VpKkSKX2u
+oF/AqEUj1+SPEtGMaC8NfL4/1Tdk6ZFk/vanGufEixsbBEyekSUVD8nMawbHa5n9
+ZC+CbZG+VYDwLW6u0Pb26CIhqpFNQL3E88uLeVNhnE+nNJfgB2Nyo8gUQszovUxk
+hv64UlXYA3wt49mpc9ORs9qKMZkuKdJfYmJkmvqLE35YpRRz6i1+hg71doj7Sjel
+naBV3qIrIbcN6I2/9ZUwVwCzttpeHDfKOxQk5szWFop6H79TZGRsrV6boQQAwnFO
+v5pjsZLhqHIZPty3zXz7Tv3LmaatwA260n6NcLBuQFiuEoh2QsMfVtLU8bBzyuC8
+Znx3kPlGCCognSjkEis+aEjsZgvCzR3aP+FWejkhnZnFiSJDvgEftODLF3gSPVp3
+dhc6q5GLysc0iN/gkBZN8Qm1lL/kEyeri4mbWT8EAM/AczrXL0tPEV3YzyeyM972
+HP9OnIYoyIkCa4M0PA0qhUPJ+vBHl/1+p5WZD/eokXqJ2M8IqNSlinuou3azbg+r
+N3xTaB0a+Vx6O/RRI73+4UK2fyN9gYRH437eliNBRTkZeZCQ6Dd5eYcABaL2DbSs
+1dyGXzRWfzdvGVu/r/0hBACER5u/uac+y9sXr79imoLVya25XkjvswGrDxmrlmNg
+cfn/bix9Z93TXScYPiyxzLwlDDd7ovlpv1+mbHNgj6krSGG+R+uLQ2+nm+9glMmz
+KupEYF59lzOgEYScJaHQWBULPRGUy/7HmZGpsDmz8zpj8lHaFNLlqDzrxw3MNKxO
+F0NFiQE8BBgBCAAmFiEERyT4ac7LLibByeabqaoHAy6P2bIFAl8kNqwCGwwFCQPC
+ZwAACgkQqaoHAy6P2bJfjQgAje6YR+p1QaNlTN9l4t2kGzy9RhkfYMrTgI2fEqbS
+9bFJUy3Y3mH+vj/r2gN/kaN8LHH4K1d7fAohBsFqSI0flzHHIx2rfti9zAlbXcAE
+rbnG+f0fk0AaqU7KelU35vjPfNe6Vn7ky6G9CC6jW04NkLZDNFA2GusdYf1aM0LW
+ew5t4WZaquLVFhL36q9eHaogO/fcPR/quvQefHokk+b541ytwMN9l/g43rTbCvAj
+rUDHwipbGbw91Wg2XjbecRiCXDKWds2M149BpxUzY5xHFtD5t5WSEE/SkkryGTMm
+TxS3tuQZ9PdtCPGrNDO6Ts/amORF04Tf+YMJgfv3IWxMeQ==
+=6kcB
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/test/e2e/start_test.go b/test/e2e/start_test.go
index 78410c9cf..aef5ca001 100644
--- a/test/e2e/start_test.go
+++ b/test/e2e/start_test.go
@@ -86,6 +86,18 @@ var _ = Describe("Podman start", func() {
Expect(session.OutputToString()).To(Equal(name))
})
+ It("podman start single container with attach and test the signal", func() {
+ SkipIfRemote()
+ session := podmanTest.Podman([]string{"create", "--entrypoint", "sh", ALPINE, "-c", "exit 1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
+ session = podmanTest.Podman([]string{"start", "--attach", cid})
+ session.WaitWithDefaultTimeout()
+ // It should forward the signal
+ Expect(session.ExitCode()).To(Equal(1))
+ })
+
It("podman start multiple containers", func() {
session := podmanTest.Podman([]string{"create", "-d", "--name", "foobar99", ALPINE, "ls"})
session.WaitWithDefaultTimeout()
diff --git a/test/endpoint/endpoint.go b/test/endpoint/endpoint.go
index 0593b05cd..d2c143824 100644
--- a/test/endpoint/endpoint.go
+++ b/test/endpoint/endpoint.go
@@ -192,12 +192,12 @@ func (p *EndpointTestIntegration) Varlink(endpoint, message string, more bool) *
}
func (s *EndpointSession) StdErrToString() string {
- fields := strings.Fields(fmt.Sprintf("%s", s.Err.Contents()))
+ fields := strings.Fields(string(s.Err.Contents()))
return strings.Join(fields, " ")
}
func (s *EndpointSession) OutputToString() string {
- fields := strings.Fields(fmt.Sprintf("%s", s.Out.Contents()))
+ fields := strings.Fields(string(s.Out.Contents()))
return strings.Join(fields, " ")
}
diff --git a/test/system/001-basic.bats b/test/system/001-basic.bats
index 71595f419..b23107e79 100644
--- a/test/system/001-basic.bats
+++ b/test/system/001-basic.bats
@@ -31,6 +31,37 @@ function setup() {
run_podman pull $IMAGE
}
+# PR #7212: allow --remote anywhere before subcommand, not just as 1st flag
+@test "podman-remote : really is remote, works as --remote option" {
+ if ! is_remote; then
+ skip "only applicable on podman-remote"
+ fi
+
+ # First things first: make sure our podman-remote actually is remote!
+ run_podman version
+ is "$output" ".*Server:" "the given podman path really contacts a server"
+
+ # $PODMAN may be a space-separated string, e.g. if we include a --url.
+ # Split it into its components; remove "-remote" from the command path;
+ # and preserve any other args if present.
+ local -a podman_as_array=($PODMAN)
+ local podman_path=${podman_as_array[0]}
+ local podman_non_remote=${podman_path%%-remote}
+ local -a podman_args=("${podman_as_array[@]:1}")
+
+ # This always worked: running "podman --remote ..."
+ PODMAN="${podman_non_remote} --remote ${podman_args[@]}" run_podman version
+ is "$output" ".*Server:" "podman --remote: contacts server"
+
+ # This was failing: "podman --foo --bar --remote".
+ PODMAN="${podman_non_remote} --tmpdir /var/tmp --log-level=error ${podman_args[@]} --remote" run_podman version
+ is "$output" ".*Server:" "podman [flags] --remote: contacts server"
+
+ # ...but no matter what, --remote is never allowed after subcommand
+ PODMAN="${podman_non_remote} ${podman_args[@]}" run_podman 125 version --remote
+ is "$output" "Error: unknown flag: --remote" "podman version --remote"
+}
+
# This is for development only; it's intended to make sure our timeout
# in run_podman continues to work. This test should never run in production
# because it will, by definition, fail.
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index b30c1103b..41863ba04 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -63,7 +63,6 @@ echo $rand | 0 | $rand
# 'run --preserve-fds' passes a number of additional file descriptors into the container
@test "podman run --preserve-fds" {
- skip "enable this once #6653 is fixed"
skip_if_remote
content=$(random_string 20)
@@ -202,6 +201,8 @@ echo $rand | 0 | $rand
}
@test "podman run docker-archive" {
+ skip_if_remote "FIXME: pending #7116"
+
# Create an image that, when run, outputs a random magic string
expect=$(random_string 20)
run_podman run --name myc --entrypoint="[\"/bin/echo\",\"$expect\"]" $IMAGE
@@ -247,6 +248,8 @@ echo $rand | 0 | $rand
# symptom only manifests on a fedora container image -- we have no
# reproducer on alpine. Checking directory ownership is good enough.
@test "podman run : user namespace preserved root ownership" {
+ skip_if_remote "FIXME: pending #7195"
+
for priv in "" "--privileged"; do
for user in "--user=0" "--user=100"; do
for keepid in "" "--userns=keep-id"; do
@@ -264,6 +267,8 @@ echo $rand | 0 | $rand
# #6829 : add username to /etc/passwd inside container if --userns=keep-id
@test "podman run : add username to /etc/passwd if --userns=keep-id" {
+ skip_if_remote "FIXME: pending #7195"
+
# Default: always run as root
run_podman run --rm $IMAGE id -un
is "$output" "root" "id -un on regular container"
@@ -286,6 +291,8 @@ echo $rand | 0 | $rand
# #6991 : /etc/passwd is modifiable
@test "podman run : --userns=keep-id: passwd file is modifiable" {
+ skip_if_remote "FIXME: pending #7195"
+
run_podman run -d --userns=keep-id $IMAGE sh -c 'while ! test -e /stop; do sleep 0.1; done'
cid="$output"
diff --git a/test/system/035-logs.bats b/test/system/035-logs.bats
index 055865c8d..cbb2091e5 100644
--- a/test/system/035-logs.bats
+++ b/test/system/035-logs.bats
@@ -25,6 +25,8 @@ load helpers
}
@test "podman logs - multi" {
+ skip_if_remote "logs does not support multiple containers when run remotely"
+
# Simple helper to make the container starts, below, easier to read
local -a cid
doit() {
diff --git a/test/system/050-stop.bats b/test/system/050-stop.bats
index 093606ece..f604ea2e2 100644
--- a/test/system/050-stop.bats
+++ b/test/system/050-stop.bats
@@ -12,9 +12,12 @@ load helpers
run_podman stop $cid
t1=$SECONDS
- # Confirm that container is stopped
+ # Confirm that container is stopped. Podman-remote unfortunately
+ # cannot tell the difference between "stopped" and "exited", and
+ # spits them out interchangeably, so we need to recognize either.
run_podman inspect --format '{{.State.Status}} {{.State.ExitCode}}' $cid
- is "$output" "exited \+137" "Status and exit code of stopped container"
+ is "$output" "\\(stopped\|exited\\) \+137" \
+ "Status and exit code of stopped container"
# The initial SIGTERM is ignored, so this operation should take
# exactly 10 seconds. Give it some leeway.
diff --git a/test/system/055-rm.bats b/test/system/055-rm.bats
index c8475c3e9..478ba0f20 100644
--- a/test/system/055-rm.bats
+++ b/test/system/055-rm.bats
@@ -44,6 +44,8 @@ load helpers
#
# See https://github.com/containers/podman/issues/3795
@test "podman rm -f" {
+ skip_if_remote "FIXME: pending #7117"
+
rand=$(random_string 30)
( sleep 3; run_podman rm -f $rand ) &
run_podman 137 run --name $rand $IMAGE sleep 30
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index a69d32a2f..0e6e97d40 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -6,9 +6,7 @@
load helpers
@test "podman build - basic test" {
- if is_remote && is_rootless; then
- skip "unreliable with podman-remote and rootless; #2972"
- fi
+ skip_if_remote "FIXME: pending #7136"
rand_filename=$(random_string 20)
rand_content=$(random_string 50)
@@ -34,6 +32,7 @@ EOF
# Regression from v1.5.0. This test passes fine in v1.5.0, fails in 1.6
@test "podman build - cache (#3920)" {
+ skip_if_remote "FIXME: pending #7136"
if is_remote && is_rootless; then
skip "unreliable with podman-remote and rootless; #2972"
fi
@@ -81,6 +80,8 @@ EOF
}
@test "podman build - URLs" {
+ skip_if_remote "FIXME: pending #7137"
+
tmpdir=$PODMAN_TMPDIR/build-test
mkdir -p $tmpdir
@@ -100,6 +101,8 @@ EOF
@test "podman build - workdir, cmd, env, label" {
+ skip_if_remote "FIXME: pending #7137"
+
tmpdir=$PODMAN_TMPDIR/build-test
mkdir -p $tmpdir
@@ -162,6 +165,7 @@ EOF
# cd to the dir, so we test relative paths (important for podman-remote)
cd $PODMAN_TMPDIR
run_podman build -t build_test -f build-test/Containerfile build-test
+ local iid="${lines[-1]}"
# Run without args - should run the above script. Verify its output.
export MYENV2="$s_env2"
@@ -229,24 +233,40 @@ Labels.$label_name | $label_value
run_podman run --rm build_test stat -c'%u:%g:%N' /a/b/c/myfile
is "$output" "4:5:/a/b/c/myfile" "file in volume is chowned"
+ # Hey, as long as we have an image with lots of layers, let's
+ # confirm that 'image tree' works as expected
+ run_podman image tree build_test
+ is "${lines[0]}" "Image ID: ${iid:0:12}" \
+ "image tree: first line"
+ is "${lines[1]}" "Tags: \[localhost/build_test:latest]" \
+ "image tree: second line"
+ is "${lines[2]}" "Size: [0-9.]\+[kM]B" \
+ "image tree: third line"
+ is "${lines[3]}" "Image Layers" \
+ "image tree: fourth line"
+ is "${lines[4]}" "... ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[$IMAGE]" \
+ "image tree: first layer line"
+ is "${lines[-1]}" "... ID: [0-9a-f]\{12\} Size: .* Top Layer of: \[localhost/build_test:latest]" \
+ "image tree: last layer line"
+
# Clean up
run_podman rmi -f build_test
}
@test "podman build - stdin test" {
- if is_remote && is_rootless; then
- skip "unreliable with podman-remote and rootless; #2972"
- fi
+ skip_if_remote "FIXME: pending #7136"
- # Random workdir, and multiple random strings to verify command & env
+ # Random workdir, and random string to verify build output
workdir=/$(random_string 10)
+ random_echo=$(random_string 15)
PODMAN_TIMEOUT=240 run_podman build -t build_test - << EOF
FROM $IMAGE
RUN mkdir $workdir
WORKDIR $workdir
-RUN /bin/echo 'Test'
+RUN /bin/echo $random_echo
EOF
is "$output" ".*STEP 5: COMMIT" "COMMIT seen in log"
+ is "$output" ".*STEP .: RUN /bin/echo $random_echo"
run_podman run --rm build_test pwd
is "$output" "$workdir" "pwd command in container"
diff --git a/test/system/075-exec.bats b/test/system/075-exec.bats
index b2c49510a..38c6c2312 100644
--- a/test/system/075-exec.bats
+++ b/test/system/075-exec.bats
@@ -6,6 +6,8 @@
load helpers
@test "podman exec - basic test" {
+ skip_if_remote "FIXME: pending #7241"
+
rand_filename=$(random_string 20)
rand_content=$(random_string 50)
@@ -19,6 +21,15 @@ load helpers
run_podman exec $cid sh -c "cat /$rand_filename"
is "$output" "$rand_content" "Can exec and see file in running container"
+
+ # Specially defined situations: exec a dir, or no such command.
+ # We don't check the full error message because runc & crun differ.
+ run_podman 126 exec $cid /etc
+ is "$output" ".*permission denied" "podman exec /etc"
+ run_podman 127 exec $cid /no/such/command
+ is "$output" ".*such file or dir" "podman exec /no/such/command"
+
+ # Done
run_podman exec $cid rm -f /$rand_filename
run_podman wait $cid
diff --git a/test/system/110-history.bats b/test/system/110-history.bats
index 5dc221d61..b83e90fe4 100644
--- a/test/system/110-history.bats
+++ b/test/system/110-history.bats
@@ -3,6 +3,8 @@
load helpers
@test "podman history - basic tests" {
+ skip_if_remote "FIXME: pending #7122"
+
tests="
| .*[0-9a-f]\\\{12\\\} .* CMD .* LABEL
--format '{{.ID}} {{.Created}}' | .*[0-9a-f]\\\{12\\\} .* ago
diff --git a/test/system/120-load.bats b/test/system/120-load.bats
index afa5ab473..4825eed07 100644
--- a/test/system/120-load.bats
+++ b/test/system/120-load.bats
@@ -28,6 +28,8 @@ verify_iid_and_name() {
@test "podman load - by image ID" {
+ skip_if_remote "FIXME: pending #7123"
+
# FIXME: how to build a simple archive instead?
get_iid_and_name
@@ -74,7 +76,7 @@ verify_iid_and_name() {
verify_iid_and_name $img_name
}
-@test "podman load - NAME and NAME:TAG arguments work (requires: #2674)" {
+@test "podman load - NAME and NAME:TAG arguments work" {
get_iid_and_name
run_podman save $iid -o $archive
run_podman rmi $iid
diff --git a/test/system/130-kill.bats b/test/system/130-kill.bats
index c16e64c58..05090f852 100644
--- a/test/system/130-kill.bats
+++ b/test/system/130-kill.bats
@@ -6,6 +6,8 @@
load helpers
@test "podman kill - test signal handling in containers" {
+ skip_if_remote "FIXME: pending #7135"
+
# podman-remote and crun interact poorly in f31: crun seems to gobble up
# some signals.
# Workaround: run 'env --default-signal sh' instead of just 'sh' in
diff --git a/test/system/140-diff.bats b/test/system/140-diff.bats
index 9f4a2c0de..01ec5430e 100644
--- a/test/system/140-diff.bats
+++ b/test/system/140-diff.bats
@@ -6,9 +6,16 @@
load helpers
@test "podman diff" {
+ n=$(random_string 10) # container name
rand_file=$(random_string 10)
- run_podman run $IMAGE sh -c "touch /$rand_file;rm /etc/services"
- run_podman diff --format json -l
+ run_podman run --name $n $IMAGE sh -c "touch /$rand_file;rm /etc/services"
+
+ # If running local, test `-l` (latest) option. This can't work with remote.
+ if ! is_remote; then
+ n=-l
+ fi
+
+ run_podman diff --format json $n
# Expected results for each type of diff
declare -A expect=(
@@ -22,7 +29,7 @@ load helpers
is "$result" "${expect[$field]}" "$field"
done
- run_podman rm -l
+ run_podman rm $n
}
# vim: filetype=sh
diff --git a/test/system/160-volumes.bats b/test/system/160-volumes.bats
index 3233e6f04..3f50bd3c4 100644
--- a/test/system/160-volumes.bats
+++ b/test/system/160-volumes.bats
@@ -140,7 +140,6 @@ EOF
# Anonymous temporary volumes, and persistent autocreated named ones
@test "podman volume, implicit creation with run" {
-
# No hostdir arg: create anonymous container with random name
rand=$(random_string)
run_podman run -v /myvol $IMAGE sh -c "echo $rand >/myvol/myfile"
@@ -187,6 +186,7 @@ EOF
# Confirm that container sees the correct id
@test "podman volume with --userns=keep-id" {
is_rootless || skip "only meaningful when run rootless"
+ skip_if_remote "FIXME: pending #7195"
myvoldir=${PODMAN_TMPDIR}/volume_$(random_string)
mkdir $myvoldir
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index 0ad555305..f3ec8a67c 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -18,7 +18,9 @@ function teardown() {
@test "podman pod top - containers in different PID namespaces" {
- skip_if_remote "podman-pod does not work with podman-remote"
+ if is_remote && is_rootless; then
+ skip "FIXME: pending #7139"
+ fi
# With infra=false, we don't get a /pause container (we also
# don't pull k8s.gcr.io/pause )
@@ -53,7 +55,9 @@ function teardown() {
@test "podman pod - communicating between pods" {
- skip_if_remote "podman-pod does not work with podman-remote"
+ if is_remote && is_rootless; then
+ skip "FIXME: pending #7139"
+ fi
podname=pod$(random_string)
run_podman 1 pod exists $podname
@@ -77,7 +81,7 @@ function teardown() {
run_podman ps --format '{{.Pod}}'
newline="
"
- is "$output" "${podid:0:12}${newline}${podid:0:12}" "sdfdsf"
+ is "$output" "${podid:0:12}${newline}${podid:0:12}" "ps shows 2 pod IDs"
# Talker: send the message via common port on localhost
message=$(random_string 15)
@@ -135,6 +139,10 @@ function random_ip() {
}
@test "podman pod create - hashtag AllTheOptions" {
+ if is_remote && is_rootless; then
+ skip "FIXME: pending #7139"
+ fi
+
mac=$(random_mac)
add_host_ip=$(random_ip)
add_host_n=$(random_string | tr A-Z a-z).$(random_string | tr A-Z a-z).xyz
diff --git a/test/system/220-healthcheck.bats b/test/system/220-healthcheck.bats
index e649ad3d2..3405029c1 100644
--- a/test/system/220-healthcheck.bats
+++ b/test/system/220-healthcheck.bats
@@ -25,6 +25,7 @@ function _check_health {
@test "podman healthcheck" {
+ skip_if_remote "FIXME: pending #7137"
# Create an image with a healthcheck script; said script will
# pass until the file /uh-oh gets created (by us, via exec)
diff --git a/test/system/400-unprivileged-access.bats b/test/system/400-unprivileged-access.bats
index 1384c0ab8..1b2d14554 100644
--- a/test/system/400-unprivileged-access.bats
+++ b/test/system/400-unprivileged-access.bats
@@ -101,6 +101,11 @@ EOF
# #6957 - mask out /proc/acpi, /sys/dev, and other sensitive system files
@test "sensitive mount points are masked without --privileged" {
+ # Weird error, maybe a flake?
+ # can only attach to created or running containers: container state improper
+ # https://github.com/containers/podman/pull/7111#issuecomment-666858715
+ skip_if_remote "FIXME: Weird flake"
+
# FIXME: this should match the list in pkg/specgen/generate/config_linux.go
local -a mps=(
/proc/acpi
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index abca91739..a6414344e 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -240,12 +240,29 @@ function is_remote() {
[[ "$PODMAN" =~ -remote ]]
}
+###########################
+# _add_label_if_missing # make sure skip messages include rootless/remote
+###########################
+function _add_label_if_missing() {
+ local msg="$1"
+ local want="$2"
+
+ if [ -z "$msg" ]; then
+ echo
+ elif expr "$msg" : ".*$want" &>/dev/null; then
+ echo "$msg"
+ else
+ echo "[$want] $msg"
+ fi
+}
+
######################
# skip_if_rootless # ...with an optional message
######################
function skip_if_rootless() {
if is_rootless; then
- skip "${1:-not applicable under rootless podman}"
+ local msg=$(_add_label_if_missing "$1" "rootless")
+ skip "${msg:-not applicable under rootless podman}"
fi
}
@@ -254,7 +271,8 @@ function skip_if_rootless() {
####################
function skip_if_remote() {
if is_remote; then
- skip "${1:-test does not work with podman-remote}"
+ local msg=$(_add_label_if_missing "$1" "remote")
+ skip "${msg:-test does not work with podman-remote}"
fi
}
diff --git a/test/utils/utils.go b/test/utils/utils.go
index 0597cd292..a45ce7b36 100644
--- a/test/utils/utils.go
+++ b/test/utils/utils.go
@@ -207,6 +207,10 @@ func WaitContainerReady(p PodmanTestCommon, id string, expStr string, timeout in
// OutputToString formats session output to string
func (s *PodmanSession) OutputToString() string {
+ if s == nil || s.Out == nil || s.Out.Contents() == nil {
+ return ""
+ }
+
fields := strings.Fields(string(s.Out.Contents()))
return strings.Join(fields, " ")
}
@@ -215,7 +219,7 @@ func (s *PodmanSession) OutputToString() string {
// where each array item is a line split by newline
func (s *PodmanSession) OutputToStringArray() []string {
var results []string
- output := fmt.Sprintf("%s", s.Out.Contents())
+ output := string(s.Out.Contents())
for _, line := range strings.Split(output, "\n") {
if line != "" {
results = append(results, line)
@@ -226,14 +230,14 @@ func (s *PodmanSession) OutputToStringArray() []string {
// ErrorToString formats session stderr to string
func (s *PodmanSession) ErrorToString() string {
- fields := strings.Fields(fmt.Sprintf("%s", s.Err.Contents()))
+ fields := strings.Fields(string(s.Err.Contents()))
return strings.Join(fields, " ")
}
// ErrorToStringArray returns the stderr output as a []string
// where each array item is a line split by newline
func (s *PodmanSession) ErrorToStringArray() []string {
- output := fmt.Sprintf("%s", s.Err.Contents())
+ output := string(s.Err.Contents())
return strings.Split(output, "\n")
}
@@ -341,6 +345,16 @@ func SystemExec(command string, args []string) *PodmanSession {
return &PodmanSession{session}
}
+// StartSystemExec is used to start exec a system command
+func StartSystemExec(command string, args []string) *PodmanSession {
+ c := exec.Command(command, args...)
+ session, err := gexec.Start(c, GinkgoWriter, GinkgoWriter)
+ if err != nil {
+ Fail(fmt.Sprintf("unable to run command: %s %s", command, strings.Join(args, " ")))
+ }
+ return &PodmanSession{session}
+}
+
// StringInSlice determines if a string is in a string slice, returns bool
func StringInSlice(s string, sl []string) bool {
for _, i := range sl {
diff --git a/utils/utils.go b/utils/utils.go
index 27ce1821d..a6ef663d7 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -9,6 +9,7 @@ import (
"strconv"
"strings"
+ "github.com/containers/podman/v2/libpod/define"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -51,7 +52,7 @@ func ExecCmdWithStdStreams(stdin io.Reader, stdout, stderr io.Writer, env []stri
// ErrDetach is an error indicating that the user manually detached from the
// container.
-var ErrDetach = errors.New("detached from container")
+var ErrDetach = define.ErrDetach
// CopyDetachable is similar to io.Copy but support a detach key sequence to break out.
func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) {
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 3b4f6e9de..a47a48453 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -27,7 +27,7 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
- # See https://github.com/containers/libpod/blob/master/contrib/cirrus/README.md#test_build_cache_images_task-task
+ # See https://github.com/containers/podman/blob/master/contrib/cirrus/README.md#test_build_cache_images_task-task
FEDORA_NAME: "fedora-32"
PRIOR_FEDORA_NAME: "fedora-31"
UBUNTU_NAME: "ubuntu-20"
@@ -111,17 +111,32 @@ gce_instance:
# not supported by bors-ng
# allow_failures: $CI == $CI
- timeout_in: 30m
+ timeout_in: 45m
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
- # FIXME: These tests mostly/always fail
- unit_test_script: '${SCRIPT_BASE}/test.sh unit |& ${_TIMESTAMP} || true'
+ unit_test_script: '${SCRIPT_BASE}/test.sh unit |& ${_TIMESTAMP}'
binary_artifacts:
path: ./bin/*
+'cirrus-ci/only_prs/conformance_task':
+ gce_instance: # Only need to specify differences from defaults (above)
+ image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
+
+ # see bors.toml
+ skip: $CIRRUS_BRANCH =~ ".*\.tmp"
+
+ # don't fail the PR when we fail until #2480 is merged
+ allow_failures: true
+
+ timeout_in: 20m
+
+ setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
+ conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}'
+
+
# This task runs `make vendor` followed by ./hack/tree_status.sh to check
# whether the git tree is clean. The reasoning for that is to make sure
# that the vendor.conf, the code and the vendored packages in ./vendor are
@@ -253,3 +268,38 @@ gce_instance:
memory: 1
script: /bin/true
+
+# Build the static binary
+'cirrus-ci/only_prs/static_binary_task':
+ depends_on:
+ - "cirrus-ci/only_prs/gate"
+
+ gce_instance:
+ image_name: "${FEDORA_CACHE_IMAGE_NAME}"
+ cpu: 8
+ memory: 12
+ disk: 200
+
+ init_script: |
+ set -ex
+ setenforce 0
+ growpart /dev/sda 1 || true
+ resize2fs /dev/sda1 || true
+ yum -y install podman
+
+ nix_cache:
+ folder: '.cache'
+ fingerprint_script: |
+ echo "nix-v1-$(sha1sum nix/nixpkgs.json | head -c 40)"
+
+ build_script: |
+ set -ex
+ mkdir -p /nix
+ mkdir -p .cache
+ mount --bind .cache /nix
+ if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
+ podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
+ chown -Rf $(whoami) .cache
+
+ binaries_artifacts:
+ path: "result/bin/buildah"
diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore
index 947216443..d98205316 100644
--- a/vendor/github.com/containers/buildah/.gitignore
+++ b/vendor/github.com/containers/buildah/.gitignore
@@ -6,5 +6,6 @@ docs/buildah*.1
tests/tools/build
Dockerfile*
!/tests/bud/*/Dockerfile*
+!/tests/conformance/**/Dockerfile*
*.swp
result
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index 892c25810..5b0e00850 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -35,16 +35,25 @@ LIBSECCOMP_COMMIT := release-2.3
EXTRA_LDFLAGS ?=
LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
-SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go util/*.go
+SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go copier/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go util/*.go
LINTFLAGS ?=
all: bin/buildah bin/imgtype docs
+# Update nix/nixpkgs.json its latest stable commit
+.PHONY: nixpkgs
nixpkgs:
- @nix run -f channel:nixpkgs-unstable nix-prefetch-git -c nix-prefetch-git \
+ @nix run -f channel:nixos-20.03 nix-prefetch-git -c nix-prefetch-git \
--no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json
+# Build statically linked binary
+.PHONY: static
+static:
+ @nix build -f nix/
+ mkdir -p ./bin
+ cp -rfp ./result/bin/* ./bin/
+
.PHONY: bin/buildah
bin/buildah: $(SOURCES)
$(GO_BUILD) $(LDFLAGS) -o $@ $(BUILDFLAGS) ./cmd/buildah
@@ -125,20 +134,24 @@ install.completions:
install.runc:
install -m 755 ../../opencontainers/runc/runc $(DESTDIR)/$(BINDIR)/
+.PHONY: test-conformance
+test-conformance:
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 15m ./tests/conformance
+
.PHONY: test-integration
test-integration: install.tools
./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/.
cd tests; ./test_runner.sh
tests/testreport/testreport: tests/testreport/testreport.go
- $(GO_BUILD) -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport
+ $(GO_BUILD) -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go
.PHONY: test-unit
test-unit: tests/testreport/testreport
- $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd)
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 40m
tmp=$(shell mktemp -d) ; \
mkdir -p $$tmp/root $$tmp/runroot; \
- $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" ./cmd/buildah -args -root $$tmp/root -runroot $$tmp/runroot -storage-driver vfs -signature-policy $(shell pwd)/tests/policy.json -registries-conf $(shell pwd)/tests/registries.conf
+ $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor
diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md
index 7589da51d..1605aaf73 100644
--- a/vendor/github.com/containers/buildah/README.md
+++ b/vendor/github.com/containers/buildah/README.md
@@ -41,7 +41,7 @@ Buildah and Podman are two complementary open-source projects that are
available on most Linux platforms and both projects reside at
[GitHub.com](https://github.com) with Buildah
[here](https://github.com/containers/buildah) and Podman
-[here](https://github.com/containers/libpod). Both, Buildah and Podman are
+[here](https://github.com/containers/podman). Both, Buildah and Podman are
command line tools that work on Open Container Initiative (OCI) images and
containers. The two projects differentiate in their specialization.
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index d65c36470..8616c4cac 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -7,6 +7,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -741,10 +742,13 @@ func runUsingChrootExecMain() {
os.Exit(1)
}
} else {
- logrus.Debugf("clearing supplemental groups")
- if err = syscall.Setgroups([]int{}); err != nil {
- fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err)
- os.Exit(1)
+ setgroups, _ := ioutil.ReadFile("/proc/self/setgroups")
+ if strings.Trim(string(setgroups), "\n") != "deny" {
+ logrus.Debugf("clearing supplemental groups")
+ if err = syscall.Setgroups([]int{}); err != nil {
+ fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err)
+ os.Exit(1)
+ }
}
}
@@ -1093,7 +1097,8 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
subSys := filepath.Join(spec.Root.Path, m.Mountpoint)
if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil {
- return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
+ logrus.Warningf("could not bind mount %q, skipping: %v", m.Mountpoint, err)
+ continue
}
if err := makeReadOnly(subSys, sysFlags); err != nil {
return undoBinds, err
@@ -1101,10 +1106,6 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys"))
- // Add /sys/fs/selinux to the set of masked paths, to ensure that we don't have processes
- // attempting to interact with labeling, when they aren't allowed to do so.
- spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux")
-
// Bind mount in everything we've been asked to mount.
for _, m := range spec.Mounts {
// Skip anything that we just mounted.
diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go
index 498bdeeb5..ff1bef2f5 100644
--- a/vendor/github.com/containers/buildah/digester.go
+++ b/vendor/github.com/containers/buildah/digester.go
@@ -1,63 +1,255 @@
package buildah
import (
+ "archive/tar"
+ "fmt"
"hash"
- "strings"
+ "io"
+ "sync"
digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
)
-type singleDigester struct {
- digester digest.Digester
- prefix string
+type digester interface {
+ io.WriteCloser
+ ContentType() string
+ Digest() digest.Digest
+}
+
+// A simple digester just digests its content as-is.
+type simpleDigester struct {
+ digester digest.Digester
+ hasher hash.Hash
+ contentType string
+}
+
+func newSimpleDigester(contentType string) digester {
+ finalDigester := digest.Canonical.Digester()
+ return &simpleDigester{
+ digester: finalDigester,
+ hasher: finalDigester.Hash(),
+ contentType: contentType,
+ }
+}
+
+func (s *simpleDigester) ContentType() string {
+ return s.contentType
+}
+
+func (s *simpleDigester) Write(p []byte) (int, error) {
+ return s.hasher.Write(p)
+}
+
+func (s *simpleDigester) Close() error {
+ return nil
+}
+
+func (s *simpleDigester) Digest() digest.Digest {
+ return s.digester.Digest()
+}
+
+// A tarFilterer passes a tarball through to an io.WriteCloser, potentially
+// modifying headers as it goes.
+type tarFilterer struct {
+ wg sync.WaitGroup
+ pipeWriter *io.PipeWriter
+ closedLock sync.Mutex
+ closed bool
+ err error
+}
+
+func (t *tarFilterer) Write(p []byte) (int, error) {
+ return t.pipeWriter.Write(p)
+}
+
+func (t *tarFilterer) Close() error {
+ t.closedLock.Lock()
+ if t.closed {
+ t.closedLock.Unlock()
+ return errors.Errorf("tar filter is already closed")
+ }
+ t.closed = true
+ t.closedLock.Unlock()
+ err := t.pipeWriter.Close()
+ t.wg.Wait()
+ if err != nil {
+ return errors.Wrapf(err, "error closing filter pipe")
+ }
+ return t.err
+}
+
+// newTarFilterer passes one or more tar archives through to an io.WriteCloser
+// as a single archive, potentially calling filter to modify headers and
+// contents as it goes.
+func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser {
+ pipeReader, pipeWriter := io.Pipe()
+ tarWriter := tar.NewWriter(writeCloser)
+ filterer := &tarFilterer{
+ pipeWriter: pipeWriter,
+ }
+ filterer.wg.Add(1)
+ go func() {
+ filterer.closedLock.Lock()
+ closed := filterer.closed
+ filterer.closedLock.Unlock()
+ for !closed {
+ tarReader := tar.NewReader(pipeReader)
+ hdr, err := tarReader.Next()
+ for err == nil {
+ var skip, replaceContents bool
+ var replacementContents io.Reader
+ if filter != nil {
+ skip, replaceContents, replacementContents = filter(hdr)
+ }
+ if !skip {
+ err = tarWriter.WriteHeader(hdr)
+ if err != nil {
+ err = errors.Wrapf(err, "error filtering tar header for %q", hdr.Name)
+ break
+ }
+ if hdr.Size != 0 {
+ var n int64
+ var copyErr error
+ if replaceContents {
+ n, copyErr = io.CopyN(tarWriter, replacementContents, hdr.Size)
+ } else {
+ n, copyErr = io.Copy(tarWriter, tarReader)
+ }
+ if copyErr != nil {
+ err = errors.Wrapf(copyErr, "error copying content for %q", hdr.Name)
+ break
+ }
+ if n != hdr.Size {
+ err = errors.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
+ break
+ }
+ }
+ }
+ hdr, err = tarReader.Next()
+ }
+ if err != io.EOF {
+ filterer.err = errors.Wrapf(err, "error reading tar archive")
+ break
+ }
+ filterer.closedLock.Lock()
+ closed = filterer.closed
+ filterer.closedLock.Unlock()
+ }
+ pipeReader.Close()
+ tarWriter.Close()
+ writeCloser.Close()
+ filterer.wg.Done()
+ }()
+ return filterer
+}
+
+// A tar digester digests an archive, modifying the headers it digests by
+// calling a specified function to potentially modify the header that it's
+// about to write.
+type tarDigester struct {
+ isOpen bool
+ nested digester
+ tarFilterer io.WriteCloser
+}
+
+func newTarDigester(contentType string) digester {
+ nested := newSimpleDigester(contentType)
+ digester := &tarDigester{
+ isOpen: true,
+ nested: nested,
+ tarFilterer: nested,
+ }
+ return digester
+}
+
+func (t *tarDigester) ContentType() string {
+ return t.nested.ContentType()
+}
+
+func (t *tarDigester) Digest() digest.Digest {
+ return t.nested.Digest()
+}
+
+func (t *tarDigester) Write(p []byte) (int, error) {
+ return t.tarFilterer.Write(p)
+}
+
+func (t *tarDigester) Close() error {
+ if t.isOpen {
+ t.isOpen = false
+ return t.tarFilterer.Close()
+ }
+ return nil
}
// CompositeDigester can compute a digest over multiple items.
type CompositeDigester struct {
- digesters []singleDigester
+ digesters []digester
+ closer io.Closer
+}
+
+// closeOpenDigester closes an open sub-digester, if we have one.
+func (c *CompositeDigester) closeOpenDigester() {
+ if c.closer != nil {
+ c.closer.Close()
+ c.closer = nil
+ }
}
// Restart clears all state, so that the composite digester can start over.
func (c *CompositeDigester) Restart() {
+ c.closeOpenDigester()
c.digesters = nil
}
-// Start starts recording the digest for a new item. The caller should call
-// Hash() immediately after to retrieve the new io.Writer.
-func (c *CompositeDigester) Start(prefix string) {
- prefix = strings.TrimSuffix(prefix, ":")
- c.digesters = append(c.digesters, singleDigester{digester: digest.Canonical.Digester(), prefix: prefix})
+// Start starts recording the digest for a new item ("", "file", or "dir").
+// The caller should call Hash() immediately after to retrieve the new
+// io.WriteCloser.
+func (c *CompositeDigester) Start(contentType string) {
+ c.closeOpenDigester()
+ switch contentType {
+ case "":
+ c.digesters = append(c.digesters, newSimpleDigester(""))
+ case "file", "dir":
+ digester := newTarDigester(contentType)
+ c.closer = digester
+ c.digesters = append(c.digesters, digester)
+ default:
+ panic(fmt.Sprintf(`unrecognized content type: expected "", "file", or "dir", got %q`, contentType))
+ }
}
// Hash returns the hasher for the current item.
-func (c *CompositeDigester) Hash() hash.Hash {
+func (c *CompositeDigester) Hash() io.WriteCloser {
num := len(c.digesters)
if num == 0 {
return nil
}
- return c.digesters[num-1].digester.Hash()
+ return c.digesters[num-1]
}
-// Digest returns the prefix and a composite digest over everything that's been
-// digested.
+// Digest returns the content type and a composite digest over everything
+// that's been digested.
func (c *CompositeDigester) Digest() (string, digest.Digest) {
+ c.closeOpenDigester()
num := len(c.digesters)
switch num {
case 0:
return "", ""
case 1:
- return c.digesters[0].prefix, c.digesters[0].digester.Digest()
+ return c.digesters[0].ContentType(), c.digesters[0].Digest()
default:
content := ""
for i, digester := range c.digesters {
if i > 0 {
content += ","
}
- prefix := digester.prefix
- if digester.prefix != "" {
- digester.prefix += ":"
+ contentType := digester.ContentType()
+ if contentType != "" {
+ contentType += ":"
}
- content += prefix + digester.digester.Digest().Encoded()
+ content += contentType + digester.Digest().Encoded()
}
return "multi", digest.Canonical.FromString(content)
}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 15834256c..c4d70e795 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -6,7 +6,7 @@ require (
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
github.com/containers/common v0.15.2
github.com/containers/image/v5 v5.5.1
- github.com/containers/ocicrypt v1.0.2
+ github.com/containers/ocicrypt v1.0.3
github.com/containers/storage v1.20.2
github.com/cyphar/filepath-securejoin v0.2.2
github.com/docker/distribution v2.7.1+incompatible
@@ -17,14 +17,14 @@ require (
github.com/hashicorp/go-multierror v1.0.0
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/mattn/go-shellwords v1.0.10
- github.com/onsi/ginkgo v1.13.0
+ github.com/onsi/ginkgo v1.14.0
github.com/onsi/gomega v1.10.1
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc91
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.5.2
+ github.com/opencontainers/selinux v1.6.0
github.com/openshift/imagebuilder v1.1.6
github.com/pkg/errors v0.9.1
github.com/seccomp/containers-golang v0.5.0
@@ -38,6 +38,8 @@ require (
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+ golang.org/x/text v0.3.3 // indirect
+ k8s.io/klog v1.0.0 // indirect
)
replace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 718d44909..1ea944af7 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -59,6 +59,8 @@ github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDpl
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
+github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
+github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
@@ -111,6 +113,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
@@ -221,6 +224,8 @@ github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.13.0 h1:M76yO2HkZASFjXL0HSoZJ1AYEmQxNJmY41Jx1zNUq1Y=
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
+github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
@@ -249,6 +254,8 @@ github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWO
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
+github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/openshift/imagebuilder v1.1.6 h1:1+YzRxIIefY4QqtCImx6rg+75QrKNfBoPAKxgMo/khM=
github.com/openshift/imagebuilder v1.1.6/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
@@ -338,6 +345,8 @@ github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJ
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -353,6 +362,8 @@ go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -418,6 +429,8 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -478,4 +491,6 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 12dca36b9..1fa276d01 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -132,7 +132,8 @@ type BuildOptions struct {
// when handling RUN instructions. If a capability appears in both lists, it
// will be dropped.
DropCapabilities []string
- CommonBuildOpts *buildah.CommonBuildOptions
+ // CommonBuildOpts is *required*.
+ CommonBuildOpts *buildah.CommonBuildOptions
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
DefaultMountsFilePath string
// IIDFile tells the builder to write the image ID to the specified file
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index 93b25887f..943e2c8cc 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -251,8 +251,9 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
// startStage creates a new stage executor that will be referenced whenever a
// COPY or ADD statement uses a --from=NAME flag.
-func (b *Executor) startStage(stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor {
+func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor {
stageExec := &StageExecutor{
+ ctx: ctx,
executor: b,
index: stage.Position,
stages: stages,
@@ -289,17 +290,24 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e
return imageRef, nil
}
-func (b *Executor) waitForStage(ctx context.Context, name string) error {
- stage := b.stages[name]
- if stage == nil {
- return errors.Errorf("unknown stage %q", name)
+// waitForStage waits for an entry to be added to terminatedStage indicating
+// that the specified stage has finished. If there is no stage defined by that
+// name, then it will return (false, nil). If there is a stage defined by that
+// name, it will return true along with any error it encounters.
+func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) {
+ found := false
+ for _, otherStage := range stages {
+ if otherStage.Name == name || fmt.Sprintf("%d", otherStage.Position) == name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false, nil
}
for {
if b.lastError != nil {
- return b.lastError
- }
- if stage.stage == nil {
- return nil
+ return true, b.lastError
}
b.stagesLock.Lock()
@@ -307,13 +315,13 @@ func (b *Executor) waitForStage(ctx context.Context, name string) error {
b.stagesLock.Unlock()
if terminated {
- return nil
+ return true, nil
}
b.stagesSemaphore.Release(1)
time.Sleep(time.Millisecond * 10)
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
- return errors.Wrapf(err, "error reacquiring job semaphore")
+ return true, errors.Wrapf(err, "error reacquiring job semaphore")
}
}
}
@@ -355,7 +363,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
}
b.stagesLock.Lock()
- stageExecutor := b.startStage(&stage, stages, output)
+ stageExecutor := b.startStage(ctx, &stage, stages, output)
b.stagesLock.Unlock()
// If this a single-layer build, or if it's a multi-layered
@@ -531,19 +539,19 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
go func() {
defer b.stagesSemaphore.Release(1)
defer wg.Done()
- imageID, ref, err = b.buildStage(ctx, cleanupStages, stages, index)
- if err != nil {
+ stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
+ if stageErr != nil {
ch <- Result{
Index: index,
- Error: err,
+ Error: stageErr,
}
return
}
ch <- Result{
Index: index,
- ImageID: imageID,
- Ref: ref,
+ ImageID: stageID,
+ Ref: stageRef,
Error: nil,
}
}()
@@ -559,6 +567,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
b.stagesLock.Lock()
b.terminatedStage[stage.Name] = struct{}{}
+ b.terminatedStage[fmt.Sprintf("%d", stage.Position)] = struct{}{}
b.stagesLock.Unlock()
if r.Error != nil {
@@ -569,7 +578,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
// If this is an intermediate stage, make a note of the ID, so
// that we can look it up later.
if r.Index < len(stages)-1 && r.ImageID != "" {
+ b.stagesLock.Lock()
b.imageMap[stage.Name] = r.ImageID
+ b.stagesLock.Unlock()
// We're not populating the cache with intermediate
// images, so add this one to the list of images that
// we'll remove later.
@@ -579,6 +590,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
if r.Index == len(stages)-1 {
imageID = r.ImageID
+ ref = r.Ref
}
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 9dc5c1b97..5b5828d01 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -44,6 +44,7 @@ import (
// If we're naming the result of the build, only the last stage will apply that
// name to the image that it produces.
type StageExecutor struct {
+ ctx context.Context
executor *Executor
index int
stages imagebuilder.Stages
@@ -262,7 +263,7 @@ func (s *StageExecutor) volumeCacheRestore() error {
// don't care about the details of where in the filesystem the content actually
// goes, because we're not actually going to add it here, so this is less
// involved than Copy().
-func (s *StageExecutor) digestSpecifiedContent(node *parser.Node, argValues []string, envValues []string) (string, error) {
+func (s *StageExecutor) digestSpecifiedContent(ctx context.Context, node *parser.Node, argValues []string, envValues []string) (string, error) {
// No instruction: done.
if node == nil {
return "", nil
@@ -295,6 +296,9 @@ func (s *StageExecutor) digestSpecifiedContent(node *parser.Node, argValues []st
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
from := strings.TrimPrefix(flag, "--from=")
+ if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
+ return "", err
+ }
if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
@@ -422,6 +426,9 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
var copyExcludes []string
contextDir := s.executor.contextDir
if len(copy.From) > 0 {
+ if isStage, err := s.executor.waitForStage(s.ctx, copy.From, s.stages[:s.index]); isStage && err != nil {
+ return err
+ }
if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
@@ -638,9 +645,11 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
// Check and see if the image is a pseudonym for the end result of a
// previous stage, named by an AS clause in the Dockerfile.
+ s.executor.stagesLock.Lock()
if asImageFound, ok := s.executor.imageMap[from]; ok {
builderOptions.FromImage = asImageFound
}
+ s.executor.stagesLock.Unlock()
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
return nil, errors.Wrapf(err, "error creating build container")
@@ -763,16 +772,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// substitute that image's ID for the base image's name here. If not,
// then go on assuming that it's just a regular image that's either in
// local storage, or one that we have to pull from a registry.
- for _, previousStage := range s.stages[:s.index] {
- if previousStage.Name == base {
- if err := s.executor.waitForStage(ctx, previousStage.Name); err != nil {
- return "", nil, err
- }
- }
+ if isStage, err := s.executor.waitForStage(ctx, base, s.stages[:s.index]); isStage && err != nil {
+ return "", nil, err
}
+ s.executor.stagesLock.Lock()
if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage {
base = stageImage
}
+ s.executor.stagesLock.Unlock()
// Create the (first) working container for this stage. Reinitializing
// the imagebuilder configuration may alter the list of steps we have,
@@ -876,10 +883,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
+ // If the source's name corresponds to the
+ // result of an earlier stage, wait for that
+ // stage to finish being built.
+ if isStage, err := s.executor.waitForStage(ctx, arr[1], s.stages[:s.index]); isStage && err != nil {
+ return "", nil, err
+ }
if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index {
- if err := s.executor.waitForStage(ctx, otherStage.name); err != nil {
- return "", nil, err
- }
mountPoint = otherStage.mountPoint
} else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
@@ -907,7 +917,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
- addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments(), ib.Config().Env)
+ addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
if err != nil {
return "", nil, err
}
@@ -956,7 +966,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// cached images so far, look for one that matches what we
// expect to produce for this instruction.
if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) {
- addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments(), ib.Config().Env)
+ addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
if err != nil {
return "", nil, err
}
@@ -1014,7 +1024,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
// In case we added content, retrieve its digest.
- addedContentDigest, err := s.digestSpecifiedContent(node, ib.Arguments(), ib.Config().Env)
+ addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
if err != nil {
return "", nil, err
}
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
index c29ff1eb8..03c9b2720 100644
--- a/vendor/github.com/containers/buildah/install.md
+++ b/vendor/github.com/containers/buildah/install.md
@@ -221,7 +221,7 @@ as yum, dnf or apt-get on a number of Linux distributions.
Prior to installing Buildah, install the following packages on your Linux distro:
* make
-* golang (Requires version 1.12 or higher.)
+* golang (Requires version 1.13 or higher.)
* bats
* btrfs-progs-devel
* bzip2
@@ -332,7 +332,7 @@ In Ubuntu zesty and xenial, you can use these commands:
sudo apt-add-repository -y ppa:projectatomic/ppa
sudo apt-get -y -qq update
sudo apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
- sudo apt-get -y install golang-1.12
+ sudo apt-get -y install golang-1.13
```
Then to install Buildah on Ubuntu follow the steps in this example:
@@ -342,7 +342,7 @@ Then to install Buildah on Ubuntu follow the steps in this example:
export GOPATH=`pwd`
git clone https://github.com/containers/buildah ./src/github.com/containers/buildah
cd ./src/github.com/containers/buildah
- PATH=/usr/lib/go-1.12/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp"
+ PATH=/usr/lib/go-1.13/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp"
sudo make install install.runc
buildah --help
```
@@ -433,7 +433,7 @@ cat /usr/share/containers/mounts.conf
`/usr/share/containers/seccomp.json`
-seccomp.json contains the whitelist of seccomp rules to be allowed inside of
+seccomp.json contains the list of seccomp rules to be allowed inside of
containers. This file is usually provided by the containers-common package.
The link above takes you to the seccomp.json
diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
index b7f704615..3f0177226 100644
--- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
+++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
@@ -275,12 +275,11 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference))
}
if infos == nil {
- image, err := s.reference.NewImage(ctx, &s.sys)
+ img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest))
if err != nil {
return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference))
}
- defer image.Close()
- infos = image.LayerInfos()
+ infos = img.LayerInfos()
}
if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 4bb760683..656a7c654 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -342,6 +343,9 @@ func GetBindMount(args []string) (specs.Mount, error) {
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
+ case "readonly":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z":
newMount.Options = append(newMount.Options, kv[0])
case "bind-propagation":
@@ -367,6 +371,10 @@ func GetBindMount(args []string) (specs.Mount, error) {
}
newMount.Destination = kv[1]
setDest = true
+ case "consistency":
+ // Option for OS X only, has no meaning on other platforms
+ // and can thus be safely ignored.
+ // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, errors.Wrapf(errBadMntOption, kv[0])
}
@@ -403,6 +411,9 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
switch kv[0] {
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
+ case "readonly":
+ // Alias for "ro"
+ newMount.Options = append(newMount.Options, "ro")
case "tmpfs-mode":
if len(kv) == 1 {
return newMount, errors.Wrapf(optionArgError, kv[0])
@@ -907,6 +918,9 @@ func defaultIsolation() (buildah.Isolation, error) {
return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
}
}
+ if unshare.IsRootless() {
+ return buildah.IsolationOCIRootless, nil
+ }
return buildah.IsolationDefault, nil
}
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index d419bb082..3af9049b7 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -394,7 +394,10 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
for _, specMount := range spec.Mounts {
// Override some of the mounts from the generated list if we're doing different things with namespaces.
if specMount.Destination == "/dev/shm" {
- specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777", "size=" + shmSize}
+ specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777"}
+ if shmSize != "" {
+ specMount.Options = append(specMount.Options, "size="+shmSize)
+ }
if hostIPC && !hostUser {
if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) {
logrus.Debugf("/dev/shm is not present, not binding into container")
@@ -841,13 +844,8 @@ func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork b
stopped := false
defer func() {
if !stopped {
- err2 := kill.Run()
- if err2 != nil {
- if err == nil {
- err = errors.Wrapf(err2, "error stopping container")
- } else {
- logrus.Infof("error stopping container: %v", err2)
- }
+ if err2 := kill.Run(); err2 != nil {
+ logrus.Infof("error stopping container: %v", err2)
}
}
}()
@@ -1779,6 +1777,8 @@ func setupMaskedPaths(g *generate.Generator) {
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware",
+ "/sys/fs/selinux",
+ "/sys/dev",
} {
g.AddLinuxMaskedPaths(mp)
}
@@ -1966,7 +1966,7 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions
}
}
- for _, envSpec := range append(append(defaultEnv, b.Env()...), options.Env...) {
+ for _, envSpec := range util.MergeEnv(util.MergeEnv(defaultEnv, b.Env()), options.Env) {
env := strings.SplitN(envSpec, "=", 2)
if len(env) > 1 {
g.AddProcessEnv(env[0], env[1])
@@ -2023,13 +2023,10 @@ func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string
Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"},
},
}
- // Cover up /sys/fs/cgroup and /sys/fs/selinux, if they exist in our source for /sys.
+ // Cover up /sys/fs/cgroup, if it exist in our source for /sys.
if _, err := os.Stat("/sys/fs/cgroup"); err == nil {
spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup")
}
- if _, err := os.Stat("/sys/fs/selinux"); err == nil {
- spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux")
- }
// Keep anything that isn't under /dev, /proc, or /sys.
for i := range spec.Mounts {
if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") ||
@@ -2073,7 +2070,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation Isolation, options RunOptions
if cmd.Stderr == nil {
cmd.Stderr = os.Stderr
}
- cmd.Env = append(os.Environ(), fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel()))
+ cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())})
preader, pwriter, err := os.Pipe()
if err != nil {
return errors.Wrapf(err, "error creating configuration pipe")
diff --git a/vendor/github.com/containers/buildah/troubleshooting.md b/vendor/github.com/containers/buildah/troubleshooting.md
index 4ff2f06c4..afd9c640a 100644
--- a/vendor/github.com/containers/buildah/troubleshooting.md
+++ b/vendor/github.com/containers/buildah/troubleshooting.md
@@ -76,7 +76,7 @@ the `buildah run` command will not complete and an error will be raised.
#### Solution
There are two solutions to this problem. The
-[`podman run`](https://github.com/containers/libpod/blob/master/docs/podman-run.1.md)
+[`podman run`](https://github.com/containers/podman/blob/master/docs/podman-run.1.md)
command can be used in place of `buildah run`. To still use `buildah run`, surround
the command with single quotes and use `bash -c`. The previous examples would be
changed to:
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index f41daa2cc..00efc8d21 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -469,3 +469,19 @@ func FindLocalRuntime(runtime string) string {
}
return localRuntime
}
+
+// MergeEnv merges two lists of environment variables, avoiding duplicates.
+func MergeEnv(defaults, overrides []string) []string {
+ s := make([]string, 0, len(defaults)+len(overrides))
+ index := make(map[string]int)
+ for _, envSpec := range append(defaults, overrides...) {
+ envVar := strings.SplitN(envSpec, "=", 2)
+ if i, ok := index[envVar[0]]; ok {
+ s[i] = envSpec
+ continue
+ }
+ s = append(s, envSpec)
+ index[envVar[0]] = len(s) - 1
+ }
+ return s
+}
diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go
new file mode 100644
index 000000000..c20f900d8
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/retry/retry.go
@@ -0,0 +1,87 @@
+package retry
+
+import (
+ "context"
+ "math"
+ "net"
+ "net/url"
+ "syscall"
+ "time"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ errcodev2 "github.com/docker/distribution/registry/api/v2"
+ "github.com/hashicorp/go-multierror"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// RetryOptions defines the option to retry
+type RetryOptions struct {
+ MaxRetry int // The number of times to possibly retry
+}
+
+// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
+func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
+ err := operation()
+ for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {
+ delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
+ logrus.Infof("Warning: failed, retrying in %s ... (%d/%d)", delay, attempt+1, retryOptions.MaxRetry)
+ select {
+ case <-time.After(delay):
+ break
+ case <-ctx.Done():
+ return err
+ }
+ err = operation()
+ }
+ return err
+}
+
+func isRetryable(err error) bool {
+ err = errors.Cause(err)
+
+ if err == context.Canceled || err == context.DeadlineExceeded {
+ return false
+ }
+
+ type unwrapper interface {
+ Unwrap() error
+ }
+
+ switch e := err.(type) {
+
+ case errcode.Error:
+ switch e.Code {
+ case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown:
+ return false
+ }
+ return true
+ case *net.OpError:
+ return isRetryable(e.Err)
+ case *url.Error:
+ return isRetryable(e.Err)
+ case syscall.Errno:
+ return e != syscall.ECONNREFUSED
+ case errcode.Errors:
+ // if this error is a group of errors, process them all in turn
+ for i := range e {
+ if !isRetryable(e[i]) {
+ return false
+ }
+ }
+ return true
+ case *multierror.Error:
+ // if this error is a group of errors, process them all in turn
+ for i := range e.Errors {
+ if !isRetryable(e.Errors[i]) {
+ return false
+ }
+ }
+ return true
+ case unwrapper:
+ err = e.Unwrap()
+ return isRetryable(err)
+ }
+
+ return false
+}
diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml
new file mode 100644
index 000000000..a5fc8651c
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/.travis.yml
@@ -0,0 +1,22 @@
+dist: xenial
+language: go
+
+os:
+- linux
+
+go:
+ - "1.13.x"
+
+matrix:
+ include:
+ - os: linux
+
+go_import_path: github.com/containers/ocicrypt
+
+install:
+ - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.19.1
+
+script:
+ - make
+ - make check
+ - make test
diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md
new file mode 100644
index 000000000..30124c89f
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the OCIcrypt Library Project
+
+The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod
index 002a526b5..5e6bc2a75 100644
--- a/vendor/github.com/containers/ocicrypt/go.mod
+++ b/vendor/github.com/containers/ocicrypt/go.mod
@@ -3,15 +3,13 @@ module github.com/containers/ocicrypt
go 1.12
require (
- github.com/containerd/containerd v1.2.10
- github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
+ github.com/davecgh/go-spew v1.1.1 // indirect
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.1
github.com/pkg/errors v0.8.1
- github.com/sirupsen/logrus v1.4.2 // indirect
github.com/stretchr/testify v1.3.0 // indirect
+ go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
- google.golang.org/grpc v1.24.0 // indirect
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect
gopkg.in/square/go-jose.v2 v2.3.1
- gotest.tools v2.2.0+incompatible // indirect
)
diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum
index 935d373d2..6b4e83d75 100644
--- a/vendor/github.com/containers/ocicrypt/go.sum
+++ b/vendor/github.com/containers/ocicrypt/go.sum
@@ -1,23 +1,7 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/containerd/containerd v1.2.10 h1:liQDhXqIn7y6cJ/7qBgOaZsiTZJc56/wkkhDBiDBRDw=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@@ -26,22 +10,16 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -49,15 +27,5 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/p
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go
index 44cafae0c..c89f3b0ea 100644
--- a/vendor/github.com/containers/ocicrypt/gpg.go
+++ b/vendor/github.com/containers/ocicrypt/gpg.go
@@ -170,7 +170,7 @@ func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool,
var args []string
if gc.gpgHomeDir != "" {
- args = append([]string{"--homedir", gc.gpgHomeDir})
+ args = []string{"--homedir", gc.gpgHomeDir}
}
args = append(args, option, fmt.Sprintf("0x%x", keyid))
@@ -229,7 +229,7 @@ func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool,
var args []string
if gc.gpgHomeDir != "" {
- args = append([]string{"--homedir", gc.gpgHomeDir})
+ args = []string{"--homedir", gc.gpgHomeDir}
}
args = append(args, option, fmt.Sprintf("0x%x", keyid))
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
index eeba64748..1feae462b 100644
--- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
+++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
@@ -23,8 +23,8 @@ import (
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
- "github.com/fullsailor/pkcs7"
"github.com/pkg/errors"
+ "go.mozilla.org/pkcs7"
)
type pkcs7KeyWrapper struct {
diff --git a/vendor/github.com/fullsailor/pkcs7/.travis.yml b/vendor/github.com/fullsailor/pkcs7/.travis.yml
deleted file mode 100644
index bc1204376..000000000
--- a/vendor/github.com/fullsailor/pkcs7/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-
-go:
- - 1.8
- - 1.9
- - "1.10"
- - tip
diff --git a/vendor/github.com/fullsailor/pkcs7/README.md b/vendor/github.com/fullsailor/pkcs7/README.md
deleted file mode 100644
index bfd948f32..000000000
--- a/vendor/github.com/fullsailor/pkcs7/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# pkcs7
-
-[![GoDoc](https://godoc.org/github.com/fullsailor/pkcs7?status.svg)](https://godoc.org/github.com/fullsailor/pkcs7)
-[![Build Status](https://travis-ci.org/fullsailor/pkcs7.svg?branch=master)](https://travis-ci.org/fullsailor/pkcs7)
-
-pkcs7 implements parsing and creating signed and enveloped messages.
-
-- Documentation on [GoDoc](http://godoc.org/github.com/fullsailor/pkcs7)
diff --git a/vendor/github.com/fullsailor/pkcs7/pkcs7.go b/vendor/github.com/fullsailor/pkcs7/pkcs7.go
deleted file mode 100644
index 0264466b4..000000000
--- a/vendor/github.com/fullsailor/pkcs7/pkcs7.go
+++ /dev/null
@@ -1,962 +0,0 @@
-// Package pkcs7 implements parsing and generation of some PKCS#7 structures.
-package pkcs7
-
-import (
- "bytes"
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
- "crypto/hmac"
- "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/asn1"
- "errors"
- "fmt"
- "math/big"
- "sort"
- "time"
-
- _ "crypto/sha1" // for crypto.SHA1
-)
-
-// PKCS7 Represents a PKCS7 structure
-type PKCS7 struct {
- Content []byte
- Certificates []*x509.Certificate
- CRLs []pkix.CertificateList
- Signers []signerInfo
- raw interface{}
-}
-
-type contentInfo struct {
- ContentType asn1.ObjectIdentifier
- Content asn1.RawValue `asn1:"explicit,optional,tag:0"`
-}
-
-// ErrUnsupportedContentType is returned when a PKCS7 content is not supported.
-// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2),
-// and Enveloped Data are supported (1.2.840.113549.1.7.3)
-var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type")
-
-type unsignedData []byte
-
-var (
- oidData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1}
- oidSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}
- oidEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3}
- oidSignedAndEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 4}
- oidDigestedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 5}
- oidEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6}
- oidAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3}
- oidAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4}
- oidAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5}
-)
-
-type signedData struct {
- Version int `asn1:"default:1"`
- DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"`
- ContentInfo contentInfo
- Certificates rawCertificates `asn1:"optional,tag:0"`
- CRLs []pkix.CertificateList `asn1:"optional,tag:1"`
- SignerInfos []signerInfo `asn1:"set"`
-}
-
-type rawCertificates struct {
- Raw asn1.RawContent
-}
-
-type envelopedData struct {
- Version int
- RecipientInfos []recipientInfo `asn1:"set"`
- EncryptedContentInfo encryptedContentInfo
-}
-
-type recipientInfo struct {
- Version int
- IssuerAndSerialNumber issuerAndSerial
- KeyEncryptionAlgorithm pkix.AlgorithmIdentifier
- EncryptedKey []byte
-}
-
-type encryptedContentInfo struct {
- ContentType asn1.ObjectIdentifier
- ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
- EncryptedContent asn1.RawValue `asn1:"tag:0,optional"`
-}
-
-type attribute struct {
- Type asn1.ObjectIdentifier
- Value asn1.RawValue `asn1:"set"`
-}
-
-type issuerAndSerial struct {
- IssuerName asn1.RawValue
- SerialNumber *big.Int
-}
-
-// MessageDigestMismatchError is returned when the signer data digest does not
-// match the computed digest for the contained content
-type MessageDigestMismatchError struct {
- ExpectedDigest []byte
- ActualDigest []byte
-}
-
-func (err *MessageDigestMismatchError) Error() string {
- return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest)
-}
-
-type signerInfo struct {
- Version int `asn1:"default:1"`
- IssuerAndSerialNumber issuerAndSerial
- DigestAlgorithm pkix.AlgorithmIdentifier
- AuthenticatedAttributes []attribute `asn1:"optional,tag:0"`
- DigestEncryptionAlgorithm pkix.AlgorithmIdentifier
- EncryptedDigest []byte
- UnauthenticatedAttributes []attribute `asn1:"optional,tag:1"`
-}
-
-// Parse decodes a DER encoded PKCS7 package
-func Parse(data []byte) (p7 *PKCS7, err error) {
- if len(data) == 0 {
- return nil, errors.New("pkcs7: input data is empty")
- }
- var info contentInfo
- der, err := ber2der(data)
- if err != nil {
- return nil, err
- }
- rest, err := asn1.Unmarshal(der, &info)
- if len(rest) > 0 {
- err = asn1.SyntaxError{Msg: "trailing data"}
- return
- }
- if err != nil {
- return
- }
-
- // fmt.Printf("--> Content Type: %s", info.ContentType)
- switch {
- case info.ContentType.Equal(oidSignedData):
- return parseSignedData(info.Content.Bytes)
- case info.ContentType.Equal(oidEnvelopedData):
- return parseEnvelopedData(info.Content.Bytes)
- }
- return nil, ErrUnsupportedContentType
-}
-
-func parseSignedData(data []byte) (*PKCS7, error) {
- var sd signedData
- asn1.Unmarshal(data, &sd)
- certs, err := sd.Certificates.Parse()
- if err != nil {
- return nil, err
- }
- // fmt.Printf("--> Signed Data Version %d\n", sd.Version)
-
- var compound asn1.RawValue
- var content unsignedData
-
- // The Content.Bytes maybe empty on PKI responses.
- if len(sd.ContentInfo.Content.Bytes) > 0 {
- if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil {
- return nil, err
- }
- }
- // Compound octet string
- if compound.IsCompound {
- if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil {
- return nil, err
- }
- } else {
- // assuming this is tag 04
- content = compound.Bytes
- }
- return &PKCS7{
- Content: content,
- Certificates: certs,
- CRLs: sd.CRLs,
- Signers: sd.SignerInfos,
- raw: sd}, nil
-}
-
-func (raw rawCertificates) Parse() ([]*x509.Certificate, error) {
- if len(raw.Raw) == 0 {
- return nil, nil
- }
-
- var val asn1.RawValue
- if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil {
- return nil, err
- }
-
- return x509.ParseCertificates(val.Bytes)
-}
-
-func parseEnvelopedData(data []byte) (*PKCS7, error) {
- var ed envelopedData
- if _, err := asn1.Unmarshal(data, &ed); err != nil {
- return nil, err
- }
- return &PKCS7{
- raw: ed,
- }, nil
-}
-
-// Verify checks the signatures of a PKCS7 object
-// WARNING: Verify does not check signing time or verify certificate chains at
-// this time.
-func (p7 *PKCS7) Verify() (err error) {
- if len(p7.Signers) == 0 {
- return errors.New("pkcs7: Message has no signers")
- }
- for _, signer := range p7.Signers {
- if err := verifySignature(p7, signer); err != nil {
- return err
- }
- }
- return nil
-}
-
-func verifySignature(p7 *PKCS7, signer signerInfo) error {
- signedData := p7.Content
- hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm)
- if err != nil {
- return err
- }
- if len(signer.AuthenticatedAttributes) > 0 {
- // TODO(fullsailor): First check the content type match
- var digest []byte
- err := unmarshalAttribute(signer.AuthenticatedAttributes, oidAttributeMessageDigest, &digest)
- if err != nil {
- return err
- }
- h := hash.New()
- h.Write(p7.Content)
- computed := h.Sum(nil)
- if !hmac.Equal(digest, computed) {
- return &MessageDigestMismatchError{
- ExpectedDigest: digest,
- ActualDigest: computed,
- }
- }
- // TODO(fullsailor): Optionally verify certificate chain
- // TODO(fullsailor): Optionally verify signingTime against certificate NotAfter/NotBefore
- signedData, err = marshalAttributes(signer.AuthenticatedAttributes)
- if err != nil {
- return err
- }
- }
- cert := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
- if cert == nil {
- return errors.New("pkcs7: No certificate for signer")
- }
-
- algo := getSignatureAlgorithmFromAI(signer.DigestEncryptionAlgorithm)
- if algo == x509.UnknownSignatureAlgorithm {
- // I'm not sure what the spec here is, and the openssl sources were not
- // helpful. But, this is what App Store receipts appear to do.
- // The DigestEncryptionAlgorithm is just "rsaEncryption (PKCS #1)"
- // But we're expecting a digest + encryption algorithm. So... we're going
- // to determine an algorithm based on the DigestAlgorithm and this
- // encryption algorithm.
- if signer.DigestEncryptionAlgorithm.Algorithm.Equal(oidEncryptionAlgorithmRSA) {
- algo = getRSASignatureAlgorithmForDigestAlgorithm(hash)
- }
- }
- return cert.CheckSignature(algo, signedData, signer.EncryptedDigest)
-}
-
-func marshalAttributes(attrs []attribute) ([]byte, error) {
- encodedAttributes, err := asn1.Marshal(struct {
- A []attribute `asn1:"set"`
- }{A: attrs})
- if err != nil {
- return nil, err
- }
-
- // Remove the leading sequence octets
- var raw asn1.RawValue
- asn1.Unmarshal(encodedAttributes, &raw)
- return raw.Bytes, nil
-}
-
-var (
- oidDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
- oidEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
-)
-
-func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate {
- for _, cert := range certs {
- if isCertMatchForIssuerAndSerial(cert, ias) {
- return cert
- }
- }
- return nil
-}
-
-func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) {
- switch {
- case oid.Equal(oidDigestAlgorithmSHA1):
- return crypto.SHA1, nil
- case oid.Equal(oidSHA256):
- return crypto.SHA256, nil
- }
- return crypto.Hash(0), ErrUnsupportedAlgorithm
-}
-
-func getRSASignatureAlgorithmForDigestAlgorithm(hash crypto.Hash) x509.SignatureAlgorithm {
- for _, details := range signatureAlgorithmDetails {
- if details.pubKeyAlgo == x509.RSA && details.hash == hash {
- return details.algo
- }
- }
- return x509.UnknownSignatureAlgorithm
-}
-
-// GetOnlySigner returns an x509.Certificate for the first signer of the signed
-// data payload. If there are more or less than one signer, nil is returned
-func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
- if len(p7.Signers) != 1 {
- return nil
- }
- signer := p7.Signers[0]
- return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
-}
-
-// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
-var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported")
-
-// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
-var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
-
-// Decrypt decrypts encrypted content info for recipient cert and private key
-func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pk crypto.PrivateKey) ([]byte, error) {
- data, ok := p7.raw.(envelopedData)
- if !ok {
- return nil, ErrNotEncryptedContent
- }
- recipient := selectRecipientForCertificate(data.RecipientInfos, cert)
- if recipient.EncryptedKey == nil {
- return nil, errors.New("pkcs7: no enveloped recipient for provided certificate")
- }
- if priv := pk.(*rsa.PrivateKey); priv != nil {
- var contentKey []byte
- contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, priv, recipient.EncryptedKey)
- if err != nil {
- return nil, err
- }
- return data.EncryptedContentInfo.decrypt(contentKey)
- }
- fmt.Printf("Unsupported Private Key: %v\n", pk)
- return nil, ErrUnsupportedAlgorithm
-}
-
-var oidEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
-var oidEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
-var oidEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
-var oidEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6}
-var oidEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2}
-
-func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
- alg := eci.ContentEncryptionAlgorithm.Algorithm
- if !alg.Equal(oidEncryptionAlgorithmDESCBC) &&
- !alg.Equal(oidEncryptionAlgorithmDESEDE3CBC) &&
- !alg.Equal(oidEncryptionAlgorithmAES256CBC) &&
- !alg.Equal(oidEncryptionAlgorithmAES128CBC) &&
- !alg.Equal(oidEncryptionAlgorithmAES128GCM) {
- fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg)
- return nil, ErrUnsupportedAlgorithm
- }
-
- // EncryptedContent can either be constructed of multple OCTET STRINGs
- // or _be_ a tagged OCTET STRING
- var cyphertext []byte
- if eci.EncryptedContent.IsCompound {
- // Complex case to concat all of the children OCTET STRINGs
- var buf bytes.Buffer
- cypherbytes := eci.EncryptedContent.Bytes
- for {
- var part []byte
- cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part)
- buf.Write(part)
- if cypherbytes == nil {
- break
- }
- }
- cyphertext = buf.Bytes()
- } else {
- // Simple case, the bytes _are_ the cyphertext
- cyphertext = eci.EncryptedContent.Bytes
- }
-
- var block cipher.Block
- var err error
-
- switch {
- case alg.Equal(oidEncryptionAlgorithmDESCBC):
- block, err = des.NewCipher(key)
- case alg.Equal(oidEncryptionAlgorithmDESEDE3CBC):
- block, err = des.NewTripleDESCipher(key)
- case alg.Equal(oidEncryptionAlgorithmAES256CBC):
- fallthrough
- case alg.Equal(oidEncryptionAlgorithmAES128GCM), alg.Equal(oidEncryptionAlgorithmAES128CBC):
- block, err = aes.NewCipher(key)
- }
-
- if err != nil {
- return nil, err
- }
-
- if alg.Equal(oidEncryptionAlgorithmAES128GCM) {
- params := aesGCMParameters{}
- paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes
-
- _, err := asn1.Unmarshal(paramBytes, &params)
- if err != nil {
- return nil, err
- }
-
- gcm, err := cipher.NewGCM(block)
- if err != nil {
- return nil, err
- }
-
- if len(params.Nonce) != gcm.NonceSize() {
- return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
- }
- if params.ICVLen != gcm.Overhead() {
- return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
- }
-
- plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil)
- if err != nil {
- return nil, err
- }
-
- return plaintext, nil
- }
-
- iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
- if len(iv) != block.BlockSize() {
- return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
- }
- mode := cipher.NewCBCDecrypter(block, iv)
- plaintext := make([]byte, len(cyphertext))
- mode.CryptBlocks(plaintext, cyphertext)
- if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil {
- return nil, err
- }
- return plaintext, nil
-}
-
-func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo {
- for _, recp := range recipients {
- if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) {
- return recp
- }
- }
- return recipientInfo{}
-}
-
-func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool {
- return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Compare(cert.RawIssuer, ias.IssuerName.FullBytes) == 0
-}
-
-func pad(data []byte, blocklen int) ([]byte, error) {
- if blocklen < 1 {
- return nil, fmt.Errorf("invalid blocklen %d", blocklen)
- }
- padlen := blocklen - (len(data) % blocklen)
- if padlen == 0 {
- padlen = blocklen
- }
- pad := bytes.Repeat([]byte{byte(padlen)}, padlen)
- return append(data, pad...), nil
-}
-
-func unpad(data []byte, blocklen int) ([]byte, error) {
- if blocklen < 1 {
- return nil, fmt.Errorf("invalid blocklen %d", blocklen)
- }
- if len(data)%blocklen != 0 || len(data) == 0 {
- return nil, fmt.Errorf("invalid data len %d", len(data))
- }
-
- // the last byte is the length of padding
- padlen := int(data[len(data)-1])
-
- // check padding integrity, all bytes should be the same
- pad := data[len(data)-padlen:]
- for _, padbyte := range pad {
- if padbyte != byte(padlen) {
- return nil, errors.New("invalid padding")
- }
- }
-
- return data[:len(data)-padlen], nil
-}
-
-func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error {
- for _, attr := range attrs {
- if attr.Type.Equal(attributeType) {
- _, err := asn1.Unmarshal(attr.Value.Bytes, out)
- return err
- }
- }
- return errors.New("pkcs7: attribute type not in attributes")
-}
-
-// UnmarshalSignedAttribute decodes a single attribute from the signer info
-func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error {
- sd, ok := p7.raw.(signedData)
- if !ok {
- return errors.New("pkcs7: payload is not signedData content")
- }
- if len(sd.SignerInfos) < 1 {
- return errors.New("pkcs7: payload has no signers")
- }
- attributes := sd.SignerInfos[0].AuthenticatedAttributes
- return unmarshalAttribute(attributes, attributeType, out)
-}
-
-// SignedData is an opaque data structure for creating signed data payloads
-type SignedData struct {
- sd signedData
- certs []*x509.Certificate
- messageDigest []byte
-}
-
-// Attribute represents a key value pair attribute. Value must be marshalable byte
-// `encoding/asn1`
-type Attribute struct {
- Type asn1.ObjectIdentifier
- Value interface{}
-}
-
-// SignerInfoConfig are optional values to include when adding a signer
-type SignerInfoConfig struct {
- ExtraSignedAttributes []Attribute
-}
-
-// NewSignedData initializes a SignedData with content
-func NewSignedData(data []byte) (*SignedData, error) {
- content, err := asn1.Marshal(data)
- if err != nil {
- return nil, err
- }
- ci := contentInfo{
- ContentType: oidData,
- Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
- }
- digAlg := pkix.AlgorithmIdentifier{
- Algorithm: oidDigestAlgorithmSHA1,
- }
- h := crypto.SHA1.New()
- h.Write(data)
- md := h.Sum(nil)
- sd := signedData{
- ContentInfo: ci,
- Version: 1,
- DigestAlgorithmIdentifiers: []pkix.AlgorithmIdentifier{digAlg},
- }
- return &SignedData{sd: sd, messageDigest: md}, nil
-}
-
-type attributes struct {
- types []asn1.ObjectIdentifier
- values []interface{}
-}
-
-// Add adds the attribute, maintaining insertion order
-func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) {
- attrs.types = append(attrs.types, attrType)
- attrs.values = append(attrs.values, value)
-}
-
-type sortableAttribute struct {
- SortKey []byte
- Attribute attribute
-}
-
-type attributeSet []sortableAttribute
-
-func (sa attributeSet) Len() int {
- return len(sa)
-}
-
-func (sa attributeSet) Less(i, j int) bool {
- return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0
-}
-
-func (sa attributeSet) Swap(i, j int) {
- sa[i], sa[j] = sa[j], sa[i]
-}
-
-func (sa attributeSet) Attributes() []attribute {
- attrs := make([]attribute, len(sa))
- for i, attr := range sa {
- attrs[i] = attr.Attribute
- }
- return attrs
-}
-
-func (attrs *attributes) ForMarshaling() ([]attribute, error) {
- sortables := make(attributeSet, len(attrs.types))
- for i := range sortables {
- attrType := attrs.types[i]
- attrValue := attrs.values[i]
- asn1Value, err := asn1.Marshal(attrValue)
- if err != nil {
- return nil, err
- }
- attr := attribute{
- Type: attrType,
- Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag
- }
- encoded, err := asn1.Marshal(attr)
- if err != nil {
- return nil, err
- }
- sortables[i] = sortableAttribute{
- SortKey: encoded,
- Attribute: attr,
- }
- }
- sort.Sort(sortables)
- return sortables.Attributes(), nil
-}
-
-// AddSigner signs attributes about the content and adds certificate to payload
-func (sd *SignedData) AddSigner(cert *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
- attrs := &attributes{}
- attrs.Add(oidAttributeContentType, sd.sd.ContentInfo.ContentType)
- attrs.Add(oidAttributeMessageDigest, sd.messageDigest)
- attrs.Add(oidAttributeSigningTime, time.Now())
- for _, attr := range config.ExtraSignedAttributes {
- attrs.Add(attr.Type, attr.Value)
- }
- finalAttrs, err := attrs.ForMarshaling()
- if err != nil {
- return err
- }
- signature, err := signAttributes(finalAttrs, pkey, crypto.SHA1)
- if err != nil {
- return err
- }
-
- ias, err := cert2issuerAndSerial(cert)
- if err != nil {
- return err
- }
-
- signer := signerInfo{
- AuthenticatedAttributes: finalAttrs,
- DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidDigestAlgorithmSHA1},
- DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidSignatureSHA1WithRSA},
- IssuerAndSerialNumber: ias,
- EncryptedDigest: signature,
- Version: 1,
- }
- // create signature of signed attributes
- sd.certs = append(sd.certs, cert)
- sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
- return nil
-}
-
-// AddCertificate adds the certificate to the payload. Useful for parent certificates
-func (sd *SignedData) AddCertificate(cert *x509.Certificate) {
- sd.certs = append(sd.certs, cert)
-}
-
-// Detach removes content from the signed data struct to make it a detached signature.
-// This must be called right before Finish()
-func (sd *SignedData) Detach() {
- sd.sd.ContentInfo = contentInfo{ContentType: oidData}
-}
-
-// Finish marshals the content and its signers
-func (sd *SignedData) Finish() ([]byte, error) {
- sd.sd.Certificates = marshalCertificates(sd.certs)
- inner, err := asn1.Marshal(sd.sd)
- if err != nil {
- return nil, err
- }
- outer := contentInfo{
- ContentType: oidSignedData,
- Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true},
- }
- return asn1.Marshal(outer)
-}
-
-func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) {
- var ias issuerAndSerial
- // The issuer RDNSequence has to match exactly the sequence in the certificate
- // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence
- ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer}
- ias.SerialNumber = cert.SerialNumber
-
- return ias, nil
-}
-
-// signs the DER encoded form of the attributes with the private key
-func signAttributes(attrs []attribute, pkey crypto.PrivateKey, hash crypto.Hash) ([]byte, error) {
- attrBytes, err := marshalAttributes(attrs)
- if err != nil {
- return nil, err
- }
- h := hash.New()
- h.Write(attrBytes)
- hashed := h.Sum(nil)
- switch priv := pkey.(type) {
- case *rsa.PrivateKey:
- return rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA1, hashed)
- }
- return nil, ErrUnsupportedAlgorithm
-}
-
-// concats and wraps the certificates in the RawValue structure
-func marshalCertificates(certs []*x509.Certificate) rawCertificates {
- var buf bytes.Buffer
- for _, cert := range certs {
- buf.Write(cert.Raw)
- }
- rawCerts, _ := marshalCertificateBytes(buf.Bytes())
- return rawCerts
-}
-
-// Even though, the tag & length are stripped out during marshalling the
-// RawContent, we have to encode it into the RawContent. If its missing,
-// then `asn1.Marshal()` will strip out the certificate wrapper instead.
-func marshalCertificateBytes(certs []byte) (rawCertificates, error) {
- var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true}
- b, err := asn1.Marshal(val)
- if err != nil {
- return rawCertificates{}, err
- }
- return rawCertificates{Raw: b}, nil
-}
-
-// DegenerateCertificate creates a signed data structure containing only the
-// provided certificate or certificate chain.
-func DegenerateCertificate(cert []byte) ([]byte, error) {
- rawCert, err := marshalCertificateBytes(cert)
- if err != nil {
- return nil, err
- }
- emptyContent := contentInfo{ContentType: oidData}
- sd := signedData{
- Version: 1,
- ContentInfo: emptyContent,
- Certificates: rawCert,
- CRLs: []pkix.CertificateList{},
- }
- content, err := asn1.Marshal(sd)
- if err != nil {
- return nil, err
- }
- signedContent := contentInfo{
- ContentType: oidSignedData,
- Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
- }
- return asn1.Marshal(signedContent)
-}
-
-const (
- EncryptionAlgorithmDESCBC = iota
- EncryptionAlgorithmAES128GCM
-)
-
-// ContentEncryptionAlgorithm determines the algorithm used to encrypt the
-// plaintext message. Change the value of this variable to change which
-// algorithm is used in the Encrypt() function.
-var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC
-
-// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt
-// content with an unsupported algorithm.
-var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC and AES-128-GCM supported")
-
-const nonceSize = 12
-
-type aesGCMParameters struct {
- Nonce []byte `asn1:"tag:4"`
- ICVLen int
-}
-
-func encryptAES128GCM(content []byte) ([]byte, *encryptedContentInfo, error) {
- // Create AES key and nonce
- key := make([]byte, 16)
- nonce := make([]byte, nonceSize)
-
- _, err := rand.Read(key)
- if err != nil {
- return nil, nil, err
- }
-
- _, err = rand.Read(nonce)
- if err != nil {
- return nil, nil, err
- }
-
- // Encrypt content
- block, err := aes.NewCipher(key)
- if err != nil {
- return nil, nil, err
- }
-
- gcm, err := cipher.NewGCM(block)
- if err != nil {
- return nil, nil, err
- }
-
- ciphertext := gcm.Seal(nil, nonce, content, nil)
-
- // Prepare ASN.1 Encrypted Content Info
- paramSeq := aesGCMParameters{
- Nonce: nonce,
- ICVLen: gcm.Overhead(),
- }
-
- paramBytes, err := asn1.Marshal(paramSeq)
- if err != nil {
- return nil, nil, err
- }
-
- eci := encryptedContentInfo{
- ContentType: oidData,
- ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
- Algorithm: oidEncryptionAlgorithmAES128GCM,
- Parameters: asn1.RawValue{
- Tag: asn1.TagSequence,
- Bytes: paramBytes,
- },
- },
- EncryptedContent: marshalEncryptedContent(ciphertext),
- }
-
- return key, &eci, nil
-}
-
-func encryptDESCBC(content []byte) ([]byte, *encryptedContentInfo, error) {
- // Create DES key & CBC IV
- key := make([]byte, 8)
- iv := make([]byte, des.BlockSize)
- _, err := rand.Read(key)
- if err != nil {
- return nil, nil, err
- }
- _, err = rand.Read(iv)
- if err != nil {
- return nil, nil, err
- }
-
- // Encrypt padded content
- block, err := des.NewCipher(key)
- if err != nil {
- return nil, nil, err
- }
- mode := cipher.NewCBCEncrypter(block, iv)
- plaintext, err := pad(content, mode.BlockSize())
- cyphertext := make([]byte, len(plaintext))
- mode.CryptBlocks(cyphertext, plaintext)
-
- // Prepare ASN.1 Encrypted Content Info
- eci := encryptedContentInfo{
- ContentType: oidData,
- ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
- Algorithm: oidEncryptionAlgorithmDESCBC,
- Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
- },
- EncryptedContent: marshalEncryptedContent(cyphertext),
- }
-
- return key, &eci, nil
-}
-
-// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
-// recipient keys for each recipient public key.
-//
-// The algorithm used to perform encryption is determined by the current value
-// of the global ContentEncryptionAlgorithm package variable. By default, the
-// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the
-// value before calling Encrypt(). For example:
-//
-// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM
-//
-// TODO(fullsailor): Add support for encrypting content with other algorithms
-func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
- var eci *encryptedContentInfo
- var key []byte
- var err error
-
- // Apply chosen symmetric encryption method
- switch ContentEncryptionAlgorithm {
- case EncryptionAlgorithmDESCBC:
- key, eci, err = encryptDESCBC(content)
-
- case EncryptionAlgorithmAES128GCM:
- key, eci, err = encryptAES128GCM(content)
-
- default:
- return nil, ErrUnsupportedEncryptionAlgorithm
- }
-
- if err != nil {
- return nil, err
- }
-
- // Prepare each recipient's encrypted cipher key
- recipientInfos := make([]recipientInfo, len(recipients))
- for i, recipient := range recipients {
- encrypted, err := encryptKey(key, recipient)
- if err != nil {
- return nil, err
- }
- ias, err := cert2issuerAndSerial(recipient)
- if err != nil {
- return nil, err
- }
- info := recipientInfo{
- Version: 0,
- IssuerAndSerialNumber: ias,
- KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{
- Algorithm: oidEncryptionAlgorithmRSA,
- },
- EncryptedKey: encrypted,
- }
- recipientInfos[i] = info
- }
-
- // Prepare envelope content
- envelope := envelopedData{
- EncryptedContentInfo: *eci,
- Version: 0,
- RecipientInfos: recipientInfos,
- }
- innerContent, err := asn1.Marshal(envelope)
- if err != nil {
- return nil, err
- }
-
- // Prepare outer payload structure
- wrapper := contentInfo{
- ContentType: oidEnvelopedData,
- Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
- }
-
- return asn1.Marshal(wrapper)
-}
-
-func marshalEncryptedContent(content []byte) asn1.RawValue {
- asn1Content, _ := asn1.Marshal(content)
- return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true}
-}
-
-func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) {
- if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil {
- return rsa.EncryptPKCS1v15(rand.Reader, pub, key)
- }
- return nil, ErrUnsupportedAlgorithm
-}
diff --git a/vendor/github.com/fullsailor/pkcs7/x509.go b/vendor/github.com/fullsailor/pkcs7/x509.go
deleted file mode 100644
index 195fd0e4b..000000000
--- a/vendor/github.com/fullsailor/pkcs7/x509.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the go/golang LICENSE file.
-
-package pkcs7
-
-// These are private constants and functions from the crypto/x509 package that
-// are useful when dealing with signatures verified by x509 certificates
-
-import (
- "bytes"
- "crypto"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/asn1"
-)
-
-var (
- oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
- oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
- oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
- oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
- oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
- oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
- oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10}
- oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
- oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
- oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
- oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
- oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
- oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
-
- oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
- oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
- oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
-
- oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8}
-
- // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA
- // but it's specified by ISO. Microsoft's makecert.exe has been known
- // to produce certificates with this OID.
- oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29}
-)
-
-var signatureAlgorithmDetails = []struct {
- algo x509.SignatureAlgorithm
- name string
- oid asn1.ObjectIdentifier
- pubKeyAlgo x509.PublicKeyAlgorithm
- hash crypto.Hash
-}{
- {x509.MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
- {x509.MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
- {x509.SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
- {x509.SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
- {x509.SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
- {x509.SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
- {x509.SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
- {x509.SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA256},
- {x509.SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA384},
- {x509.SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA512},
- {x509.DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
- {x509.DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
- {x509.ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
- {x509.ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
- {x509.ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
- {x509.ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
-}
-
-// pssParameters reflects the parameters in an AlgorithmIdentifier that
-// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3
-type pssParameters struct {
- // The following three fields are not marked as
- // optional because the default values specify SHA-1,
- // which is no longer suitable for use in signatures.
- Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"`
- MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"`
- SaltLength int `asn1:"explicit,tag:2"`
- TrailerField int `asn1:"optional,explicit,tag:3,default:1"`
-}
-
-// asn1.NullBytes is not available prior to Go 1.9
-var nullBytes = []byte{5, 0}
-
-func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) x509.SignatureAlgorithm {
- if !ai.Algorithm.Equal(oidSignatureRSAPSS) {
- for _, details := range signatureAlgorithmDetails {
- if ai.Algorithm.Equal(details.oid) {
- return details.algo
- }
- }
- return x509.UnknownSignatureAlgorithm
- }
-
- // RSA PSS is special because it encodes important parameters
- // in the Parameters.
-
- var params pssParameters
- if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, &params); err != nil {
- return x509.UnknownSignatureAlgorithm
- }
-
- var mgf1HashFunc pkix.AlgorithmIdentifier
- if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil {
- return x509.UnknownSignatureAlgorithm
- }
-
- // PSS is greatly overburdened with options. This code forces
- // them into three buckets by requiring that the MGF1 hash
- // function always match the message hash function (as
- // recommended in
- // https://tools.ietf.org/html/rfc3447#section-8.1), that the
- // salt length matches the hash length, and that the trailer
- // field has the default value.
- if !bytes.Equal(params.Hash.Parameters.FullBytes, nullBytes) ||
- !params.MGF.Algorithm.Equal(oidMGF1) ||
- !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
- !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, nullBytes) ||
- params.TrailerField != 1 {
- return x509.UnknownSignatureAlgorithm
- }
-
- switch {
- case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32:
- return x509.SHA256WithRSAPSS
- case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48:
- return x509.SHA384WithRSAPSS
- case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64:
- return x509.SHA512WithRSAPSS
- }
-
- return x509.UnknownSignatureAlgorithm
-}
diff --git a/vendor/github.com/fullsailor/pkcs7/.gitignore b/vendor/go.mozilla.org/pkcs7/.gitignore
index daf913b1b..daf913b1b 100644
--- a/vendor/github.com/fullsailor/pkcs7/.gitignore
+++ b/vendor/go.mozilla.org/pkcs7/.gitignore
diff --git a/vendor/go.mozilla.org/pkcs7/.travis.yml b/vendor/go.mozilla.org/pkcs7/.travis.yml
new file mode 100644
index 000000000..eac4c1762
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+ - "1.11"
+ - "1.12"
+ - "1.13"
+ - tip
+before_install:
+ - make gettools
+script:
+ - make
diff --git a/vendor/github.com/fullsailor/pkcs7/LICENSE b/vendor/go.mozilla.org/pkcs7/LICENSE
index 75f320908..75f320908 100644
--- a/vendor/github.com/fullsailor/pkcs7/LICENSE
+++ b/vendor/go.mozilla.org/pkcs7/LICENSE
diff --git a/vendor/go.mozilla.org/pkcs7/Makefile b/vendor/go.mozilla.org/pkcs7/Makefile
new file mode 100644
index 000000000..47c73b868
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/Makefile
@@ -0,0 +1,20 @@
+all: vet staticcheck test
+
+test:
+ go test -covermode=count -coverprofile=coverage.out .
+
+showcoverage: test
+ go tool cover -html=coverage.out
+
+vet:
+ go vet .
+
+lint:
+ golint .
+
+staticcheck:
+ staticcheck .
+
+gettools:
+ go get -u honnef.co/go/tools/...
+ go get -u golang.org/x/lint/golint
diff --git a/vendor/go.mozilla.org/pkcs7/README.md b/vendor/go.mozilla.org/pkcs7/README.md
new file mode 100644
index 000000000..bf37059c5
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/README.md
@@ -0,0 +1,69 @@
+# pkcs7
+
+[![GoDoc](https://godoc.org/go.mozilla.org/pkcs7?status.svg)](https://godoc.org/go.mozilla.org/pkcs7)
+[![Build Status](https://travis-ci.org/mozilla-services/pkcs7.svg?branch=master)](https://travis-ci.org/mozilla-services/pkcs7)
+
+pkcs7 implements parsing and creating signed and enveloped messages.
+
+```go
+package main
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "os"
+
+ "go.mozilla.org/pkcs7"
+)
+
+func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) {
+ toBeSigned, err := NewSignedData(content)
+ if err != nil {
+ err = fmt.Errorf("Cannot initialize signed data: %s", err)
+ return
+ }
+ if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil {
+ err = fmt.Errorf("Cannot add signer: %s", err)
+ return
+ }
+
+ // Detach signature, omit if you want an embedded signature
+ toBeSigned.Detach()
+
+ signed, err = toBeSigned.Finish()
+ if err != nil {
+ err = fmt.Errorf("Cannot finish signing data: %s", err)
+ return
+ }
+
+ // Verify the signature
+ pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed})
+ p7, err := pkcs7.Parse(signed)
+ if err != nil {
+ err = fmt.Errorf("Cannot parse our signed data: %s", err)
+ return
+ }
+
+ // since the signature was detached, reattach the content here
+ p7.Content = content
+
+ if bytes.Compare(content, p7.Content) != 0 {
+ err = fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content)
+ return
+ }
+ if err = p7.Verify(); err != nil {
+ err = fmt.Errorf("Cannot verify our signed data: %s", err)
+ return
+ }
+
+ return signed, nil
+}
+```
+
+
+
+## Credits
+This is a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7)
diff --git a/vendor/github.com/fullsailor/pkcs7/ber.go b/vendor/go.mozilla.org/pkcs7/ber.go
index 89e96d30c..585256739 100644
--- a/vendor/github.com/fullsailor/pkcs7/ber.go
+++ b/vendor/go.mozilla.org/pkcs7/ber.go
@@ -5,7 +5,7 @@ import (
"errors"
)
-// var encodeIndent = 0
+var encodeIndent = 0
type asn1Object interface {
EncodeTo(writer *bytes.Buffer) error
@@ -18,7 +18,7 @@ type asn1Structured struct {
func (s asn1Structured) EncodeTo(out *bytes.Buffer) error {
//fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes)
- //encodeIndent++
+ encodeIndent++
inner := new(bytes.Buffer)
for _, obj := range s.content {
err := obj.EncodeTo(inner)
@@ -26,7 +26,7 @@ func (s asn1Structured) EncodeTo(out *bytes.Buffer) error {
return err
}
}
- //encodeIndent--
+ encodeIndent--
out.Write(s.tagBytes)
encodeLength(out, inner.Len())
out.Write(inner.Bytes())
@@ -133,35 +133,49 @@ func encodeLength(out *bytes.Buffer, length int) (err error) {
}
func readObject(ber []byte, offset int) (asn1Object, int, error) {
- //fmt.Printf("\n====> Starting readObject at offset: %d\n\n", offset)
+ berLen := len(ber)
+ if offset >= berLen {
+ return nil, 0, errors.New("ber2der: offset is after end of ber data")
+ }
tagStart := offset
b := ber[offset]
offset++
+ if offset >= berLen {
+ return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
+ }
tag := b & 0x1F // last 5 bits
if tag == 0x1F {
tag = 0
for ber[offset] >= 0x80 {
tag = tag*128 + ber[offset] - 0x80
offset++
+ if offset > berLen {
+ return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
+ }
}
- tag = tag*128 + ber[offset] - 0x80
+ // jvehent 20170227: this doesn't appear to be used anywhere...
+ //tag = tag*128 + ber[offset] - 0x80
offset++
+ if offset > berLen {
+ return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
+ }
}
tagEnd := offset
kind := b & 0x20
- /*
- if kind == 0 {
- fmt.Print("--> Primitive\n")
- } else {
- fmt.Print("--> Constructed\n")
- }
- */
+ if kind == 0 {
+ debugprint("--> Primitive\n")
+ } else {
+ debugprint("--> Constructed\n")
+ }
// read length
var length int
l := ber[offset]
offset++
- indefinite := false
+ if offset > berLen {
+ return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
+ }
+ hack := 0
if l > 0x80 {
numberOfBytes := (int)(l & 0x7F)
if numberOfBytes > 4 { // int is only guaranteed to be 32bit
@@ -170,33 +184,42 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F {
return nil, 0, errors.New("ber2der: BER tag length is negative")
}
- if 0x0 == (int)(ber[offset]) {
+ if (int)(ber[offset]) == 0x0 {
return nil, 0, errors.New("ber2der: BER tag length has leading zero")
}
- //fmt.Printf("--> (compute length) indicator byte: %x\n", l)
- //fmt.Printf("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes])
+ debugprint("--> (compute length) indicator byte: %x\n", l)
+ debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes])
for i := 0; i < numberOfBytes; i++ {
length = length*256 + (int)(ber[offset])
offset++
+ if offset > berLen {
+ return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
+ }
}
} else if l == 0x80 {
- indefinite = true
+ // find length by searching content
+ markerIndex := bytes.LastIndex(ber[offset:], []byte{0x0, 0x0})
+ if markerIndex == -1 {
+ return nil, 0, errors.New("ber2der: Invalid BER format")
+ }
+ length = markerIndex
+ hack = 2
+ debugprint("--> (compute length) marker found at offset: %d\n", markerIndex+offset)
} else {
length = (int)(l)
}
-
+ if length < 0 {
+ return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length")
+ }
//fmt.Printf("--> length : %d\n", length)
contentEnd := offset + length
if contentEnd > len(ber) {
return nil, 0, errors.New("ber2der: BER tag length is more than available data")
}
- //fmt.Printf("--> content start : %d\n", offset)
- //fmt.Printf("--> content end : %d\n", contentEnd)
- //fmt.Printf("--> content : % X\n", ber[offset:contentEnd])
+ debugprint("--> content start : %d\n", offset)
+ debugprint("--> content end : %d\n", contentEnd)
+ debugprint("--> content : % X\n", ber[offset:contentEnd])
var obj asn1Object
- if indefinite && kind == 0 {
- return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding")
- }
if kind == 0 {
obj = asn1Primitive{
tagBytes: ber[tagStart:tagEnd],
@@ -205,25 +228,14 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
}
} else {
var subObjects []asn1Object
- for (offset < contentEnd) || indefinite {
+ for offset < contentEnd {
var subObj asn1Object
var err error
- subObj, offset, err = readObject(ber, offset)
+ subObj, offset, err = readObject(ber[:contentEnd], offset)
if err != nil {
return nil, 0, err
}
subObjects = append(subObjects, subObj)
-
- if indefinite {
- terminated, err := isIndefiniteTermination(ber, offset)
- if err != nil {
- return nil, 0, err
- }
-
- if terminated {
- break
- }
- }
}
obj = asn1Structured{
tagBytes: ber[tagStart:tagEnd],
@@ -231,18 +243,9 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
}
}
- // Apply indefinite form length with 0x0000 terminator.
- if indefinite {
- contentEnd = offset + 2
- }
-
- return obj, contentEnd, nil
+ return obj, contentEnd + hack, nil
}
-func isIndefiniteTermination(ber []byte, offset int) (bool, error) {
- if len(ber) - offset < 2 {
- return false, errors.New("ber2der: Invalid BER format")
- }
-
- return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil
+func debugprint(format string, a ...interface{}) {
+ //fmt.Printf(format, a)
}
diff --git a/vendor/go.mozilla.org/pkcs7/decrypt.go b/vendor/go.mozilla.org/pkcs7/decrypt.go
new file mode 100644
index 000000000..0d088d628
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/decrypt.go
@@ -0,0 +1,177 @@
+package pkcs7
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+)
+
+// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
+var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported")
+
+// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
+var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
+
+// Decrypt decrypts encrypted content info for recipient cert and private key
+func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) {
+ data, ok := p7.raw.(envelopedData)
+ if !ok {
+ return nil, ErrNotEncryptedContent
+ }
+ recipient := selectRecipientForCertificate(data.RecipientInfos, cert)
+ if recipient.EncryptedKey == nil {
+ return nil, errors.New("pkcs7: no enveloped recipient for provided certificate")
+ }
+ switch pkey := pkey.(type) {
+ case *rsa.PrivateKey:
+ var contentKey []byte
+ contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey)
+ if err != nil {
+ return nil, err
+ }
+ return data.EncryptedContentInfo.decrypt(contentKey)
+ }
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// DecryptUsingPSK decrypts encrypted data using caller provided
+// pre-shared secret
+func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) {
+ data, ok := p7.raw.(encryptedData)
+ if !ok {
+ return nil, ErrNotEncryptedContent
+ }
+ return data.EncryptedContentInfo.decrypt(key)
+}
+
+func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
+ alg := eci.ContentEncryptionAlgorithm.Algorithm
+ if !alg.Equal(OIDEncryptionAlgorithmDESCBC) &&
+ !alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) &&
+ !alg.Equal(OIDEncryptionAlgorithmAES256CBC) &&
+ !alg.Equal(OIDEncryptionAlgorithmAES128CBC) &&
+ !alg.Equal(OIDEncryptionAlgorithmAES128GCM) &&
+ !alg.Equal(OIDEncryptionAlgorithmAES256GCM) {
+ fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg)
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ // EncryptedContent can either be constructed of multple OCTET STRINGs
+ // or _be_ a tagged OCTET STRING
+ var cyphertext []byte
+ if eci.EncryptedContent.IsCompound {
+ // Complex case to concat all of the children OCTET STRINGs
+ var buf bytes.Buffer
+ cypherbytes := eci.EncryptedContent.Bytes
+ for {
+ var part []byte
+ cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part)
+ buf.Write(part)
+ if cypherbytes == nil {
+ break
+ }
+ }
+ cyphertext = buf.Bytes()
+ } else {
+ // Simple case, the bytes _are_ the cyphertext
+ cyphertext = eci.EncryptedContent.Bytes
+ }
+
+ var block cipher.Block
+ var err error
+
+ switch {
+ case alg.Equal(OIDEncryptionAlgorithmDESCBC):
+ block, err = des.NewCipher(key)
+ case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC):
+ block, err = des.NewTripleDESCipher(key)
+ case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM):
+ fallthrough
+ case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC):
+ block, err = aes.NewCipher(key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) {
+ params := aesGCMParameters{}
+ paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes
+
+ _, err := asn1.Unmarshal(paramBytes, &params)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(params.Nonce) != gcm.NonceSize() {
+ return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
+ }
+ if params.ICVLen != gcm.Overhead() {
+ return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
+ }
+
+ plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return plaintext, nil
+ }
+
+ iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
+ if len(iv) != block.BlockSize() {
+ return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
+ }
+ mode := cipher.NewCBCDecrypter(block, iv)
+ plaintext := make([]byte, len(cyphertext))
+ mode.CryptBlocks(plaintext, cyphertext)
+ if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil {
+ return nil, err
+ }
+ return plaintext, nil
+}
+
+func unpad(data []byte, blocklen int) ([]byte, error) {
+ if blocklen < 1 {
+ return nil, fmt.Errorf("invalid blocklen %d", blocklen)
+ }
+ if len(data)%blocklen != 0 || len(data) == 0 {
+ return nil, fmt.Errorf("invalid data len %d", len(data))
+ }
+
+ // the last byte is the length of padding
+ padlen := int(data[len(data)-1])
+
+ // check padding integrity, all bytes should be the same
+ pad := data[len(data)-padlen:]
+ for _, padbyte := range pad {
+ if padbyte != byte(padlen) {
+ return nil, errors.New("invalid padding")
+ }
+ }
+
+ return data[:len(data)-padlen], nil
+}
+
+func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo {
+ for _, recp := range recipients {
+ if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) {
+ return recp
+ }
+ }
+ return recipientInfo{}
+}
diff --git a/vendor/go.mozilla.org/pkcs7/encrypt.go b/vendor/go.mozilla.org/pkcs7/encrypt.go
new file mode 100644
index 000000000..da57ae643
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/encrypt.go
@@ -0,0 +1,399 @@
+package pkcs7
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+)
+
+type envelopedData struct {
+ Version int
+ RecipientInfos []recipientInfo `asn1:"set"`
+ EncryptedContentInfo encryptedContentInfo
+}
+
+type encryptedData struct {
+ Version int
+ EncryptedContentInfo encryptedContentInfo
+}
+
+type recipientInfo struct {
+ Version int
+ IssuerAndSerialNumber issuerAndSerial
+ KeyEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedKey []byte
+}
+
+type encryptedContentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedContent asn1.RawValue `asn1:"tag:0,optional,explicit"`
+}
+
+const (
+ // EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm
+ EncryptionAlgorithmDESCBC = iota
+
+ // EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm
+ // Avoid this algorithm unless required for interoperability; use AES GCM instead.
+ EncryptionAlgorithmAES128CBC
+
+ // EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm
+ // Avoid this algorithm unless required for interoperability; use AES GCM instead.
+ EncryptionAlgorithmAES256CBC
+
+ // EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm
+ EncryptionAlgorithmAES128GCM
+
+ // EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm
+ EncryptionAlgorithmAES256GCM
+)
+
+// ContentEncryptionAlgorithm determines the algorithm used to encrypt the
+// plaintext message. Change the value of this variable to change which
+// algorithm is used in the Encrypt() function.
+var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC
+
+// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt
+// content with an unsupported algorithm.
+var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported")
+
+// ErrPSKNotProvided is returned when attempting to encrypt
+// using a PSK without actually providing the PSK.
+var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided")
+
+const nonceSize = 12
+
+type aesGCMParameters struct {
+ Nonce []byte `asn1:"tag:4"`
+ ICVLen int
+}
+
+func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) {
+ var keyLen int
+ var algID asn1.ObjectIdentifier
+ switch ContentEncryptionAlgorithm {
+ case EncryptionAlgorithmAES128GCM:
+ keyLen = 16
+ algID = OIDEncryptionAlgorithmAES128GCM
+ case EncryptionAlgorithmAES256GCM:
+ keyLen = 32
+ algID = OIDEncryptionAlgorithmAES256GCM
+ default:
+ return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm)
+ }
+ if key == nil {
+ // Create AES key
+ key = make([]byte, keyLen)
+
+ _, err := rand.Read(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Create nonce
+ nonce := make([]byte, nonceSize)
+
+ _, err := rand.Read(nonce)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Encrypt content
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ciphertext := gcm.Seal(nil, nonce, content, nil)
+
+ // Prepare ASN.1 Encrypted Content Info
+ paramSeq := aesGCMParameters{
+ Nonce: nonce,
+ ICVLen: gcm.Overhead(),
+ }
+
+ paramBytes, err := asn1.Marshal(paramSeq)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ eci := encryptedContentInfo{
+ ContentType: OIDData,
+ ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: algID,
+ Parameters: asn1.RawValue{
+ Tag: asn1.TagSequence,
+ Bytes: paramBytes,
+ },
+ },
+ EncryptedContent: marshalEncryptedContent(ciphertext),
+ }
+
+ return key, &eci, nil
+}
+
+func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) {
+ if key == nil {
+ // Create DES key
+ key = make([]byte, 8)
+
+ _, err := rand.Read(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Create CBC IV
+ iv := make([]byte, des.BlockSize)
+ _, err := rand.Read(iv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Encrypt padded content
+ block, err := des.NewCipher(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ mode := cipher.NewCBCEncrypter(block, iv)
+ plaintext, err := pad(content, mode.BlockSize())
+ if err != nil {
+ return nil, nil, err
+ }
+ cyphertext := make([]byte, len(plaintext))
+ mode.CryptBlocks(cyphertext, plaintext)
+
+ // Prepare ASN.1 Encrypted Content Info
+ eci := encryptedContentInfo{
+ ContentType: OIDData,
+ ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: OIDEncryptionAlgorithmDESCBC,
+ Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
+ },
+ EncryptedContent: marshalEncryptedContent(cyphertext),
+ }
+
+ return key, &eci, nil
+}
+
+func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) {
+ var keyLen int
+ var algID asn1.ObjectIdentifier
+ switch ContentEncryptionAlgorithm {
+ case EncryptionAlgorithmAES128CBC:
+ keyLen = 16
+ algID = OIDEncryptionAlgorithmAES128CBC
+ case EncryptionAlgorithmAES256CBC:
+ keyLen = 32
+ algID = OIDEncryptionAlgorithmAES256CBC
+ default:
+ return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm)
+ }
+
+ if key == nil {
+ // Create AES key
+ key = make([]byte, keyLen)
+
+ _, err := rand.Read(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Create CBC IV
+ iv := make([]byte, aes.BlockSize)
+ _, err := rand.Read(iv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Encrypt padded content
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ mode := cipher.NewCBCEncrypter(block, iv)
+ plaintext, err := pad(content, mode.BlockSize())
+ if err != nil {
+ return nil, nil, err
+ }
+ cyphertext := make([]byte, len(plaintext))
+ mode.CryptBlocks(cyphertext, plaintext)
+
+ // Prepare ASN.1 Encrypted Content Info
+ eci := encryptedContentInfo{
+ ContentType: OIDData,
+ ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: algID,
+ Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
+ },
+ EncryptedContent: marshalEncryptedContent(cyphertext),
+ }
+
+ return key, &eci, nil
+}
+
+// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
+// recipient keys for each recipient public key.
+//
+// The algorithm used to perform encryption is determined by the current value
+// of the global ContentEncryptionAlgorithm package variable. By default, the
+// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the
+// value before calling Encrypt(). For example:
+//
+// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM
+//
+// TODO(fullsailor): Add support for encrypting content with other algorithms
+func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
+ var eci *encryptedContentInfo
+ var key []byte
+ var err error
+
+ // Apply chosen symmetric encryption method
+ switch ContentEncryptionAlgorithm {
+ case EncryptionAlgorithmDESCBC:
+ key, eci, err = encryptDESCBC(content, nil)
+ case EncryptionAlgorithmAES128CBC:
+ fallthrough
+ case EncryptionAlgorithmAES256CBC:
+ key, eci, err = encryptAESCBC(content, nil)
+ case EncryptionAlgorithmAES128GCM:
+ fallthrough
+ case EncryptionAlgorithmAES256GCM:
+ key, eci, err = encryptAESGCM(content, nil)
+
+ default:
+ return nil, ErrUnsupportedEncryptionAlgorithm
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare each recipient's encrypted cipher key
+ recipientInfos := make([]recipientInfo, len(recipients))
+ for i, recipient := range recipients {
+ encrypted, err := encryptKey(key, recipient)
+ if err != nil {
+ return nil, err
+ }
+ ias, err := cert2issuerAndSerial(recipient)
+ if err != nil {
+ return nil, err
+ }
+ info := recipientInfo{
+ Version: 0,
+ IssuerAndSerialNumber: ias,
+ KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: OIDEncryptionAlgorithmRSA,
+ },
+ EncryptedKey: encrypted,
+ }
+ recipientInfos[i] = info
+ }
+
+ // Prepare envelope content
+ envelope := envelopedData{
+ EncryptedContentInfo: *eci,
+ Version: 0,
+ RecipientInfos: recipientInfos,
+ }
+ innerContent, err := asn1.Marshal(envelope)
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare outer payload structure
+ wrapper := contentInfo{
+ ContentType: OIDEnvelopedData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
+ }
+
+ return asn1.Marshal(wrapper)
+}
+
+// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure,
+// encrypted using caller provided pre-shared secret.
+func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) {
+ var eci *encryptedContentInfo
+ var err error
+
+ if key == nil {
+ return nil, ErrPSKNotProvided
+ }
+
+ // Apply chosen symmetric encryption method
+ switch ContentEncryptionAlgorithm {
+ case EncryptionAlgorithmDESCBC:
+ _, eci, err = encryptDESCBC(content, key)
+
+ case EncryptionAlgorithmAES128GCM:
+ fallthrough
+ case EncryptionAlgorithmAES256GCM:
+ _, eci, err = encryptAESGCM(content, key)
+
+ default:
+ return nil, ErrUnsupportedEncryptionAlgorithm
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare encrypted-data content
+ ed := encryptedData{
+ Version: 0,
+ EncryptedContentInfo: *eci,
+ }
+ innerContent, err := asn1.Marshal(ed)
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare outer payload structure
+ wrapper := contentInfo{
+ ContentType: OIDEncryptedData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
+ }
+
+ return asn1.Marshal(wrapper)
+}
+
+func marshalEncryptedContent(content []byte) asn1.RawValue {
+ asn1Content, _ := asn1.Marshal(content)
+ return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true}
+}
+
+func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) {
+ if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil {
+ return rsa.EncryptPKCS1v15(rand.Reader, pub, key)
+ }
+ return nil, ErrUnsupportedAlgorithm
+}
+
+func pad(data []byte, blocklen int) ([]byte, error) {
+ if blocklen < 1 {
+ return nil, fmt.Errorf("invalid blocklen %d", blocklen)
+ }
+ padlen := blocklen - (len(data) % blocklen)
+ if padlen == 0 {
+ padlen = blocklen
+ }
+ pad := bytes.Repeat([]byte{byte(padlen)}, padlen)
+ return append(data, pad...), nil
+}
diff --git a/vendor/go.mozilla.org/pkcs7/go.mod b/vendor/go.mozilla.org/pkcs7/go.mod
new file mode 100644
index 000000000..10ddff325
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/go.mod
@@ -0,0 +1,3 @@
+module go.mozilla.org/pkcs7
+
+go 1.11
diff --git a/vendor/go.mozilla.org/pkcs7/pkcs7.go b/vendor/go.mozilla.org/pkcs7/pkcs7.go
new file mode 100644
index 000000000..ccc6cc6df
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/pkcs7.go
@@ -0,0 +1,291 @@
+// Package pkcs7 implements parsing and generation of some PKCS#7 structures.
+package pkcs7
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "sort"
+
+ _ "crypto/sha1" // for crypto.SHA1
+)
+
+// PKCS7 Represents a PKCS7 structure
+type PKCS7 struct {
+ Content []byte
+ Certificates []*x509.Certificate
+ CRLs []pkix.CertificateList
+ Signers []signerInfo
+ raw interface{}
+}
+
+type contentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ Content asn1.RawValue `asn1:"explicit,optional,tag:0"`
+}
+
+// ErrUnsupportedContentType is returned when a PKCS7 content is not supported.
+// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2),
+// and Enveloped Data are supported (1.2.840.113549.1.7.3)
+var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type")
+
+type unsignedData []byte
+
+var (
+ // Signed Data OIDs
+ OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1}
+ OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}
+ OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3}
+ OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6}
+ OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3}
+ OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4}
+ OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5}
+
+ // Digest Algorithms
+ OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
+ OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
+ OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
+ OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
+
+ OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
+ OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+
+ OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+
+ // Signature Algorithms
+ OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+ OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+
+ OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
+ OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
+ OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
+
+ // Encryption Algorithms
+ OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
+ OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
+ OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
+ OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6}
+ OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2}
+ OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46}
+)
+
+func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) {
+ switch {
+ case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1),
+ oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1),
+ oid.Equal(OIDEncryptionAlgorithmRSA):
+ return crypto.SHA1, nil
+ case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256):
+ return crypto.SHA256, nil
+ case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384):
+ return crypto.SHA384, nil
+ case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512):
+ return crypto.SHA512, nil
+ }
+ return crypto.Hash(0), ErrUnsupportedAlgorithm
+}
+
+// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm
+// and returns the corresponding OID digest algorithm
+func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) {
+ switch digestAlg {
+ case x509.SHA1WithRSA, x509.ECDSAWithSHA1:
+ return OIDDigestAlgorithmSHA1, nil
+ case x509.SHA256WithRSA, x509.ECDSAWithSHA256:
+ return OIDDigestAlgorithmSHA256, nil
+ case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
+ return OIDDigestAlgorithmSHA384, nil
+ case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
+ return OIDDigestAlgorithmSHA512, nil
+ }
+ return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm")
+}
+
+// getOIDForEncryptionAlgorithm takes the private key type of the signer and
+// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm
+func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) {
+ switch pkey.(type) {
+ case *rsa.PrivateKey:
+ switch {
+ default:
+ return OIDEncryptionAlgorithmRSA, nil
+ case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA):
+ return OIDEncryptionAlgorithmRSA, nil
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1):
+ return OIDEncryptionAlgorithmRSASHA1, nil
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256):
+ return OIDEncryptionAlgorithmRSASHA256, nil
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384):
+ return OIDEncryptionAlgorithmRSASHA384, nil
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512):
+ return OIDEncryptionAlgorithmRSASHA512, nil
+ }
+ case *ecdsa.PrivateKey:
+ switch {
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1):
+ return OIDDigestAlgorithmECDSASHA1, nil
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256):
+ return OIDDigestAlgorithmECDSASHA256, nil
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384):
+ return OIDDigestAlgorithmECDSASHA384, nil
+ case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512):
+ return OIDDigestAlgorithmECDSASHA512, nil
+ }
+ case *dsa.PrivateKey:
+ return OIDDigestAlgorithmDSA, nil
+ }
+ return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey)
+
+}
+
+// Parse decodes a DER encoded PKCS7 package
+func Parse(data []byte) (p7 *PKCS7, err error) {
+ if len(data) == 0 {
+ return nil, errors.New("pkcs7: input data is empty")
+ }
+ var info contentInfo
+ der, err := ber2der(data)
+ if err != nil {
+ return nil, err
+ }
+ rest, err := asn1.Unmarshal(der, &info)
+ if len(rest) > 0 {
+ err = asn1.SyntaxError{Msg: "trailing data"}
+ return
+ }
+ if err != nil {
+ return
+ }
+
+ // fmt.Printf("--> Content Type: %s", info.ContentType)
+ switch {
+ case info.ContentType.Equal(OIDSignedData):
+ return parseSignedData(info.Content.Bytes)
+ case info.ContentType.Equal(OIDEnvelopedData):
+ return parseEnvelopedData(info.Content.Bytes)
+ case info.ContentType.Equal(OIDEncryptedData):
+ return parseEncryptedData(info.Content.Bytes)
+ }
+ return nil, ErrUnsupportedContentType
+}
+
+func parseEnvelopedData(data []byte) (*PKCS7, error) {
+ var ed envelopedData
+ if _, err := asn1.Unmarshal(data, &ed); err != nil {
+ return nil, err
+ }
+ return &PKCS7{
+ raw: ed,
+ }, nil
+}
+
+func parseEncryptedData(data []byte) (*PKCS7, error) {
+ var ed encryptedData
+ if _, err := asn1.Unmarshal(data, &ed); err != nil {
+ return nil, err
+ }
+ return &PKCS7{
+ raw: ed,
+ }, nil
+}
+
+func (raw rawCertificates) Parse() ([]*x509.Certificate, error) {
+ if len(raw.Raw) == 0 {
+ return nil, nil
+ }
+
+ var val asn1.RawValue
+ if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil {
+ return nil, err
+ }
+
+ return x509.ParseCertificates(val.Bytes)
+}
+
+func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool {
+ return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes)
+}
+
+// Attribute represents a key value pair attribute. Value must be marshalable byte
+// `encoding/asn1`
+type Attribute struct {
+ Type asn1.ObjectIdentifier
+ Value interface{}
+}
+
+type attributes struct {
+ types []asn1.ObjectIdentifier
+ values []interface{}
+}
+
+// Add adds the attribute, maintaining insertion order
+func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) {
+ attrs.types = append(attrs.types, attrType)
+ attrs.values = append(attrs.values, value)
+}
+
+type sortableAttribute struct {
+ SortKey []byte
+ Attribute attribute
+}
+
+type attributeSet []sortableAttribute
+
+func (sa attributeSet) Len() int {
+ return len(sa)
+}
+
+func (sa attributeSet) Less(i, j int) bool {
+ return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0
+}
+
+func (sa attributeSet) Swap(i, j int) {
+ sa[i], sa[j] = sa[j], sa[i]
+}
+
+func (sa attributeSet) Attributes() []attribute {
+ attrs := make([]attribute, len(sa))
+ for i, attr := range sa {
+ attrs[i] = attr.Attribute
+ }
+ return attrs
+}
+
+func (attrs *attributes) ForMarshalling() ([]attribute, error) {
+ sortables := make(attributeSet, len(attrs.types))
+ for i := range sortables {
+ attrType := attrs.types[i]
+ attrValue := attrs.values[i]
+ asn1Value, err := asn1.Marshal(attrValue)
+ if err != nil {
+ return nil, err
+ }
+ attr := attribute{
+ Type: attrType,
+ Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag
+ }
+ encoded, err := asn1.Marshal(attr)
+ if err != nil {
+ return nil, err
+ }
+ sortables[i] = sortableAttribute{
+ SortKey: encoded,
+ Attribute: attr,
+ }
+ }
+ sort.Sort(sortables)
+ return sortables.Attributes(), nil
+}
diff --git a/vendor/go.mozilla.org/pkcs7/sign.go b/vendor/go.mozilla.org/pkcs7/sign.go
new file mode 100644
index 000000000..addd76383
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/sign.go
@@ -0,0 +1,429 @@
+package pkcs7
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+)
+
+// SignedData is an opaque data structure for creating signed data payloads
+type SignedData struct {
+ sd signedData
+ certs []*x509.Certificate
+ data, messageDigest []byte
+ digestOid asn1.ObjectIdentifier
+ encryptionOid asn1.ObjectIdentifier
+}
+
+// NewSignedData takes data and initializes a PKCS7 SignedData struct that is
+// ready to be signed via AddSigner. The digest algorithm is set to SHA1 by default
+// and can be changed by calling SetDigestAlgorithm.
+func NewSignedData(data []byte) (*SignedData, error) {
+ content, err := asn1.Marshal(data)
+ if err != nil {
+ return nil, err
+ }
+ ci := contentInfo{
+ ContentType: OIDData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
+ }
+ sd := signedData{
+ ContentInfo: ci,
+ Version: 1,
+ }
+ return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA1}, nil
+}
+
+// SignerInfoConfig are optional values to include when adding a signer
+type SignerInfoConfig struct {
+ ExtraSignedAttributes []Attribute
+ ExtraUnsignedAttributes []Attribute
+}
+
+type signedData struct {
+ Version int `asn1:"default:1"`
+ DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"`
+ ContentInfo contentInfo
+ Certificates rawCertificates `asn1:"optional,tag:0"`
+ CRLs []pkix.CertificateList `asn1:"optional,tag:1"`
+ SignerInfos []signerInfo `asn1:"set"`
+}
+
+type signerInfo struct {
+ Version int `asn1:"default:1"`
+ IssuerAndSerialNumber issuerAndSerial
+ DigestAlgorithm pkix.AlgorithmIdentifier
+ AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"`
+ DigestEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedDigest []byte
+ UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"`
+}
+
+type attribute struct {
+ Type asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"set"`
+}
+
+func marshalAttributes(attrs []attribute) ([]byte, error) {
+ encodedAttributes, err := asn1.Marshal(struct {
+ A []attribute `asn1:"set"`
+ }{A: attrs})
+ if err != nil {
+ return nil, err
+ }
+
+ // Remove the leading sequence octets
+ var raw asn1.RawValue
+ asn1.Unmarshal(encodedAttributes, &raw)
+ return raw.Bytes, nil
+}
+
+type rawCertificates struct {
+ Raw asn1.RawContent
+}
+
+type issuerAndSerial struct {
+ IssuerName asn1.RawValue
+ SerialNumber *big.Int
+}
+
+// SetDigestAlgorithm sets the digest algorithm to be used in the signing process.
+//
+// This should be called before adding signers
+func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) {
+ sd.digestOid = d
+}
+
+// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process.
+//
+// This should be called before adding signers
+func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) {
+ sd.encryptionOid = d
+}
+
+// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent.
+func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
+ var parents []*x509.Certificate
+ return sd.AddSignerChain(ee, pkey, parents, config)
+}
+
+// AddSignerChain signs attributes about the content and adds certificates
+// and signers infos to the Signed Data. The certificate and private key
+// of the end-entity signer are used to issue the signature, and any
+// parent of that end-entity that need to be added to the list of
+// certifications can be specified in the parents slice.
+//
+// The signature algorithm used to hash the data is the one of the end-entity
+// certificate.
+func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error {
+// Following RFC 2315, 9.2 SignerInfo type, the distinguished name of
+// the issuer of the end-entity signer is stored in the issuerAndSerialNumber
+// section of the SignedData.SignerInfo, alongside the serial number of
+// the end-entity.
+ var ias issuerAndSerial
+ ias.SerialNumber = ee.SerialNumber
+ if len(parents) == 0 {
+ // no parent, the issuer is the end-entity cert itself
+ ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer}
+ } else {
+ err := verifyPartialChain(ee, parents)
+ if err != nil {
+ return err
+ }
+ // the first parent is the issuer
+ ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject}
+ }
+ sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers,
+ pkix.AlgorithmIdentifier{Algorithm: sd.digestOid},
+ )
+ hash, err := getHashForOID(sd.digestOid)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
+ h.Write(sd.data)
+ sd.messageDigest = h.Sum(nil)
+ encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid)
+ if err != nil {
+ return err
+ }
+ attrs := &attributes{}
+ attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType)
+ attrs.Add(OIDAttributeMessageDigest, sd.messageDigest)
+ attrs.Add(OIDAttributeSigningTime, time.Now().UTC())
+ for _, attr := range config.ExtraSignedAttributes {
+ attrs.Add(attr.Type, attr.Value)
+ }
+ finalAttrs, err := attrs.ForMarshalling()
+ if err != nil {
+ return err
+ }
+ unsignedAttrs := &attributes{}
+ for _, attr := range config.ExtraUnsignedAttributes {
+ unsignedAttrs.Add(attr.Type, attr.Value)
+ }
+ finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling()
+ if err != nil {
+ return err
+ }
+ // create signature of signed attributes
+ signature, err := signAttributes(finalAttrs, pkey, hash)
+ if err != nil {
+ return err
+ }
+ signer := signerInfo{
+ AuthenticatedAttributes: finalAttrs,
+ UnauthenticatedAttributes: finalUnsignedAttrs,
+ DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid},
+ DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid},
+ IssuerAndSerialNumber: ias,
+ EncryptedDigest: signature,
+ Version: 1,
+ }
+ sd.certs = append(sd.certs, ee)
+ if len(parents) > 0 {
+ sd.certs = append(sd.certs, parents...)
+ }
+ sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
+ return nil
+}
+
+// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData.
+// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone
+// and does not include any signed attributes like timestamp and so on.
+//
+// This function is needed to sign old Android APKs, something you probably
+// shouldn't do unless you're maintaining backward compatibility for old
+// applications.
+func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
+ var signature []byte
+ sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid})
+ hash, err := getHashForOID(sd.digestOid)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
+ h.Write(sd.data)
+ sd.messageDigest = h.Sum(nil)
+ switch pkey := pkey.(type) {
+ case *dsa.PrivateKey:
+ // dsa doesn't implement crypto.Signer so we make a special case
+ // https://github.com/golang/go/issues/27889
+ r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest)
+ if err != nil {
+ return err
+ }
+ signature, err = asn1.Marshal(dsaSignature{r, s})
+ if err != nil {
+ return err
+ }
+ default:
+ key, ok := pkey.(crypto.Signer)
+ if !ok {
+ return errors.New("pkcs7: private key does not implement crypto.Signer")
+ }
+ signature, err = key.Sign(rand.Reader, sd.messageDigest, hash)
+ if err != nil {
+ return err
+ }
+ }
+ var ias issuerAndSerial
+ ias.SerialNumber = ee.SerialNumber
+ // no parent, the issue is the end-entity cert itself
+ ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer}
+ if sd.encryptionOid == nil {
+ // if the encryption algorithm wasn't set by SetEncryptionAlgorithm,
+ // infer it from the digest algorithm
+ sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid)
+ }
+ if err != nil {
+ return err
+ }
+ signer := signerInfo{
+ DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid},
+ DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid},
+ IssuerAndSerialNumber: ias,
+ EncryptedDigest: signature,
+ Version: 1,
+ }
+ // create signature of signed attributes
+ sd.certs = append(sd.certs, ee)
+ sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
+ return nil
+}
+
+func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error {
+ unsignedAttrs := &attributes{}
+ for _, attr := range extraUnsignedAttrs {
+ unsignedAttrs.Add(attr.Type, attr.Value)
+ }
+ finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling()
+ if err != nil {
+ return err
+ }
+
+ si.UnauthenticatedAttributes = finalUnsignedAttrs
+
+ return nil
+}
+
+// AddCertificate adds the certificate to the payload. Useful for parent certificates
+func (sd *SignedData) AddCertificate(cert *x509.Certificate) {
+ sd.certs = append(sd.certs, cert)
+}
+
+// Detach removes content from the signed data struct to make it a detached signature.
+// This must be called right before Finish()
+func (sd *SignedData) Detach() {
+ sd.sd.ContentInfo = contentInfo{ContentType: OIDData}
+}
+
+// GetSignedData returns the private Signed Data
+func (sd *SignedData) GetSignedData() *signedData {
+ return &sd.sd
+}
+
+// Finish marshals the content and its signers
+func (sd *SignedData) Finish() ([]byte, error) {
+ sd.sd.Certificates = marshalCertificates(sd.certs)
+ inner, err := asn1.Marshal(sd.sd)
+ if err != nil {
+ return nil, err
+ }
+ outer := contentInfo{
+ ContentType: OIDSignedData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true},
+ }
+ return asn1.Marshal(outer)
+}
+
+// RemoveAuthenticatedAttributes removes authenticated attributes from signedData
+// similar to OpenSSL's PKCS7_NOATTR or -noattr flags
+func (sd *SignedData) RemoveAuthenticatedAttributes() {
+ for i := range sd.sd.SignerInfos {
+ sd.sd.SignerInfos[i].AuthenticatedAttributes = nil
+ }
+}
+
+// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData
+func (sd *SignedData) RemoveUnauthenticatedAttributes() {
+ for i := range sd.sd.SignerInfos {
+ sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil
+ }
+}
+
+// verifyPartialChain checks that a given cert is issued by the first parent in the list,
+// then continue down the path. It doesn't require the last parent to be a root CA,
+// or to be trusted in any truststore. It simply verifies that the chain provided, albeit
+// partial, makes sense.
+func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error {
+ if len(parents) == 0 {
+ return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName)
+ }
+ err := cert.CheckSignatureFrom(parents[0])
+ if err != nil {
+ return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err)
+ }
+ if len(parents) == 1 {
+ // there is no more parent to check, return
+ return nil
+ }
+ return verifyPartialChain(parents[0], parents[1:])
+}
+
+func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) {
+ var ias issuerAndSerial
+ // The issuer RDNSequence has to match exactly the sequence in the certificate
+ // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence
+ ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer}
+ ias.SerialNumber = cert.SerialNumber
+
+ return ias, nil
+}
+
+// signs the DER encoded form of the attributes with the private key
+func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) {
+ attrBytes, err := marshalAttributes(attrs)
+ if err != nil {
+ return nil, err
+ }
+ h := digestAlg.New()
+ h.Write(attrBytes)
+ hash := h.Sum(nil)
+
+ // dsa doesn't implement crypto.Signer so we make a special case
+ // https://github.com/golang/go/issues/27889
+ switch pkey := pkey.(type) {
+ case *dsa.PrivateKey:
+ r, s, err := dsa.Sign(rand.Reader, pkey, hash)
+ if err != nil {
+ return nil, err
+ }
+ return asn1.Marshal(dsaSignature{r, s})
+ }
+
+ key, ok := pkey.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("pkcs7: private key does not implement crypto.Signer")
+ }
+ return key.Sign(rand.Reader, hash, digestAlg)
+}
+
+type dsaSignature struct {
+ R, S *big.Int
+}
+
+// concats and wraps the certificates in the RawValue structure
+func marshalCertificates(certs []*x509.Certificate) rawCertificates {
+ var buf bytes.Buffer
+ for _, cert := range certs {
+ buf.Write(cert.Raw)
+ }
+ rawCerts, _ := marshalCertificateBytes(buf.Bytes())
+ return rawCerts
+}
+
+// Even though, the tag & length are stripped out during marshalling the
+// RawContent, we have to encode it into the RawContent. If its missing,
+// then `asn1.Marshal()` will strip out the certificate wrapper instead.
+func marshalCertificateBytes(certs []byte) (rawCertificates, error) {
+ var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true}
+ b, err := asn1.Marshal(val)
+ if err != nil {
+ return rawCertificates{}, err
+ }
+ return rawCertificates{Raw: b}, nil
+}
+
+// DegenerateCertificate creates a signed data structure containing only the
+// provided certificate or certificate chain.
+func DegenerateCertificate(cert []byte) ([]byte, error) {
+ rawCert, err := marshalCertificateBytes(cert)
+ if err != nil {
+ return nil, err
+ }
+ emptyContent := contentInfo{ContentType: OIDData}
+ sd := signedData{
+ Version: 1,
+ ContentInfo: emptyContent,
+ Certificates: rawCert,
+ CRLs: []pkix.CertificateList{},
+ }
+ content, err := asn1.Marshal(sd)
+ if err != nil {
+ return nil, err
+ }
+ signedContent := contentInfo{
+ ContentType: OIDSignedData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
+ }
+ return asn1.Marshal(signedContent)
+}
diff --git a/vendor/go.mozilla.org/pkcs7/verify.go b/vendor/go.mozilla.org/pkcs7/verify.go
new file mode 100644
index 000000000..c8ead2362
--- /dev/null
+++ b/vendor/go.mozilla.org/pkcs7/verify.go
@@ -0,0 +1,264 @@
+package pkcs7
+
+import (
+ "crypto/subtle"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "time"
+)
+
+// Verify is a wrapper around VerifyWithChain() that initializes an empty
+// trust store, effectively disabling certificate verification when validating
+// a signature.
+func (p7 *PKCS7) Verify() (err error) {
+ return p7.VerifyWithChain(nil)
+}
+
+// VerifyWithChain checks the signatures of a PKCS7 object.
+// If truststore is not nil, it also verifies the chain of trust of the end-entity
+// signer cert to one of the root in the truststore.
+func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) {
+ if len(p7.Signers) == 0 {
+ return errors.New("pkcs7: Message has no signers")
+ }
+ for _, signer := range p7.Signers {
+ if err := verifySignature(p7, signer, truststore); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (err error) {
+ signedData := p7.Content
+ ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
+ if ee == nil {
+ return errors.New("pkcs7: No certificate for signer")
+ }
+ signingTime := time.Now().UTC()
+ if len(signer.AuthenticatedAttributes) > 0 {
+ // TODO(fullsailor): First check the content type match
+ var digest []byte
+ err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest)
+ if err != nil {
+ return err
+ }
+ hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
+ h.Write(p7.Content)
+ computed := h.Sum(nil)
+ if subtle.ConstantTimeCompare(digest, computed) != 1 {
+ return &MessageDigestMismatchError{
+ ExpectedDigest: digest,
+ ActualDigest: computed,
+ }
+ }
+ signedData, err = marshalAttributes(signer.AuthenticatedAttributes)
+ if err != nil {
+ return err
+ }
+ err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime)
+ if err == nil {
+ // signing time found, performing validity check
+ if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) {
+ return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q",
+ signingTime.Format(time.RFC3339),
+ ee.NotBefore.Format(time.RFC3339),
+ ee.NotBefore.Format(time.RFC3339))
+ }
+ }
+ }
+ if truststore != nil {
+ _, err = verifyCertChain(ee, p7.Certificates, truststore, signingTime)
+ if err != nil {
+ return err
+ }
+ }
+ sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm)
+ if err != nil {
+ return err
+ }
+ return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest)
+}
+
+// GetOnlySigner returns an x509.Certificate for the first signer of the signed
+// data payload. If there are more or less than one signer, nil is returned
+func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
+ if len(p7.Signers) != 1 {
+ return nil
+ }
+ signer := p7.Signers[0]
+ return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
+}
+
+// UnmarshalSignedAttribute decodes a single attribute from the signer info
+func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error {
+ sd, ok := p7.raw.(signedData)
+ if !ok {
+ return errors.New("pkcs7: payload is not signedData content")
+ }
+ if len(sd.SignerInfos) < 1 {
+ return errors.New("pkcs7: payload has no signers")
+ }
+ attributes := sd.SignerInfos[0].AuthenticatedAttributes
+ return unmarshalAttribute(attributes, attributeType, out)
+}
+
+func parseSignedData(data []byte) (*PKCS7, error) {
+ var sd signedData
+ asn1.Unmarshal(data, &sd)
+ certs, err := sd.Certificates.Parse()
+ if err != nil {
+ return nil, err
+ }
+ // fmt.Printf("--> Signed Data Version %d\n", sd.Version)
+
+ var compound asn1.RawValue
+ var content unsignedData
+
+ // The Content.Bytes maybe empty on PKI responses.
+ if len(sd.ContentInfo.Content.Bytes) > 0 {
+ if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil {
+ return nil, err
+ }
+ }
+ // Compound octet string
+ if compound.IsCompound {
+ if compound.Tag == 4 {
+ if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil {
+ return nil, err
+ }
+ } else {
+ content = compound.Bytes
+ }
+ } else {
+ // assuming this is tag 04
+ content = compound.Bytes
+ }
+ return &PKCS7{
+ Content: content,
+ Certificates: certs,
+ CRLs: sd.CRLs,
+ Signers: sd.SignerInfos,
+ raw: sd}, nil
+}
+
+// verifyCertChain takes an end-entity certs, a list of potential intermediates and a
+// truststore, and built all potential chains between the EE and a trusted root.
+//
+// When verifying chains that may have expired, currentTime can be set to a past date
+// to allow the verification to pass. If unset, currentTime is set to the current UTC time.
+func verifyCertChain(ee *x509.Certificate, certs []*x509.Certificate, truststore *x509.CertPool, currentTime time.Time) (chains [][]*x509.Certificate, err error) {
+ intermediates := x509.NewCertPool()
+ for _, intermediate := range certs {
+ intermediates.AddCert(intermediate)
+ }
+ verifyOptions := x509.VerifyOptions{
+ Roots: truststore,
+ Intermediates: intermediates,
+ KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
+ CurrentTime: currentTime,
+ }
+ chains, err = ee.Verify(verifyOptions)
+ if err != nil {
+ return chains, fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err)
+ }
+ return
+}
+
+// MessageDigestMismatchError is returned when the signer data digest does not
+// match the computed digest for the contained content
+type MessageDigestMismatchError struct {
+ ExpectedDigest []byte
+ ActualDigest []byte
+}
+
+func (err *MessageDigestMismatchError) Error() string {
+ return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest)
+}
+
+func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) {
+ switch {
+ case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1):
+ return x509.ECDSAWithSHA1, nil
+ case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256):
+ return x509.ECDSAWithSHA256, nil
+ case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384):
+ return x509.ECDSAWithSHA384, nil
+ case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512):
+ return x509.ECDSAWithSHA512, nil
+ case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA),
+ digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1),
+ digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256),
+ digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384),
+ digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512):
+ switch {
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1):
+ return x509.SHA1WithRSA, nil
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256):
+ return x509.SHA256WithRSA, nil
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384):
+ return x509.SHA384WithRSA, nil
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512):
+ return x509.SHA512WithRSA, nil
+ default:
+ return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q",
+ digest.Algorithm.String(), digestEncryption.Algorithm.String())
+ }
+ case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA),
+ digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1):
+ switch {
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1):
+ return x509.DSAWithSHA1, nil
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256):
+ return x509.DSAWithSHA256, nil
+ default:
+ return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q",
+ digest.Algorithm.String(), digestEncryption.Algorithm.String())
+ }
+ case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256),
+ digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384),
+ digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521):
+ switch {
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1):
+ return x509.ECDSAWithSHA1, nil
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256):
+ return x509.ECDSAWithSHA256, nil
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384):
+ return x509.ECDSAWithSHA384, nil
+ case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512):
+ return x509.ECDSAWithSHA512, nil
+ default:
+ return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q",
+ digest.Algorithm.String(), digestEncryption.Algorithm.String())
+ }
+ default:
+ return -1, fmt.Errorf("pkcs7: unsupported algorithm %q",
+ digestEncryption.Algorithm.String())
+ }
+}
+
+func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate {
+ for _, cert := range certs {
+ if isCertMatchForIssuerAndSerial(cert, ias) {
+ return cert
+ }
+ }
+ return nil
+}
+
+func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error {
+ for _, attr := range attrs {
+ if attr.Type.Equal(attributeType) {
+ _, err := asn1.Unmarshal(attr.Value.Bytes, out)
+ return err
+ }
+ }
+ return errors.New("pkcs7: attribute type not in attributes")
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1c1865e23..3f490616a 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -66,7 +66,7 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.15.1-0.20200708111410-d2ea9429455d
+# github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
@@ -90,6 +90,7 @@ github.com/containers/common/pkg/auth
github.com/containers/common/pkg/capabilities
github.com/containers/common/pkg/cgroupv2
github.com/containers/common/pkg/config
+github.com/containers/common/pkg/retry
github.com/containers/common/pkg/sysinfo
github.com/containers/common/version
# github.com/containers/conmon v2.0.19+incompatible
@@ -137,7 +138,7 @@ github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b
github.com/containers/libtrust
-# github.com/containers/ocicrypt v1.0.2
+# github.com/containers/ocicrypt v1.0.3
github.com/containers/ocicrypt
github.com/containers/ocicrypt/blockcipher
github.com/containers/ocicrypt/config
@@ -279,8 +280,6 @@ github.com/docker/spdystream/spdy
github.com/fsnotify/fsnotify
# github.com/fsouza/go-dockerclient v1.6.5
github.com/fsouza/go-dockerclient
-# github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
-github.com/fullsailor/pkcs7
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
# github.com/godbus/dbus/v5 v5.0.3
@@ -554,6 +553,8 @@ github.com/xeipuuv/gojsonreference
github.com/xeipuuv/gojsonschema
# go.etcd.io/bbolt v1.3.5
go.etcd.io/bbolt
+# go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
+go.mozilla.org/pkcs7
# go.opencensus.io v0.22.0
go.opencensus.io
go.opencensus.io/internal