aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2019-12-19 13:29:25 -0500
committerDaniel J Walsh <dwalsh@redhat.com>2019-12-20 09:30:47 -0500
commit50ece79387dcf6c748e3ae1bd6a7067059c0dfe3 (patch)
tree6b30c4f66f7be315ff2257447be3818be98fb50f
parenta359ca0d1825859dd8b7c1384f11d703ec6625b4 (diff)
downloadpodman-50ece79387dcf6c748e3ae1bd6a7067059c0dfe3.tar.gz
podman-50ece79387dcf6c748e3ae1bd6a7067059c0dfe3.tar.bz2
podman-50ece79387dcf6c748e3ae1bd6a7067059c0dfe3.zip
build(deps): bump github.com/containers/image/v5 from 5.0.0 to 5.1.0
Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.0.0 to 5.1.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.0.0...v5.1.0) Signed-off-by: dependabot-preview[bot] <support@dependabot.com> Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
-rw-r--r--go.mod5
-rw-r--r--go.sum21
-rw-r--r--test/e2e/create_test.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go246
-rw-r--r--vendor/github.com/containers/image/v5/copy/encrypt.go24
-rw-r--r--vendor/github.com/containers/image/v5/copy/manifest.go8
-rw-r--r--vendor/github.com/containers/image/v5/copy/progress_reader.go69
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_dest.go2
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/dest.go2
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/src.go5
-rw-r--r--vendor/github.com/containers/image/v5/docker/archive/transport.go4
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go12
-rw-r--r--vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go7
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go2
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go2
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/dest.go14
-rw-r--r--vendor/github.com/containers/image/v5/docker/tarfile/src.go22
-rw-r--r--vendor/github.com/containers/image/v5/docker/wwwauthenticate.go4
-rw-r--r--vendor/github.com/containers/image/v5/image/docker_schema1.go5
-rw-r--r--vendor/github.com/containers/image/v5/image/docker_schema2.go5
-rw-r--r--vendor/github.com/containers/image/v5/image/manifest.go2
-rw-r--r--vendor/github.com/containers/image/v5/image/oci.go5
-rw-r--r--vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go3
-rw-r--r--vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go7
-rw-r--r--vendor/github.com/containers/image/v5/manifest/list.go4
-rw-r--r--vendor/github.com/containers/image/v5/manifest/manifest.go5
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go67
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_dest.go10
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_src.go21
-rw-r--r--vendor/github.com/containers/image/v5/oci/archive/oci_transport.go11
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_dest.go2
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_transport.go2
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift-copies.go80
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift.go5
-rw-r--r--vendor/github.com/containers/image/v5/ostree/ostree_dest.go2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go5
-rw-r--r--vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go7
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_eval.go1
-rw-r--r--vendor/github.com/containers/image/v5/signature/signature.go4
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_image.go9
-rw-r--r--vendor/github.com/containers/image/v5/storage/storage_reference.go5
-rw-r--r--vendor/github.com/containers/image/v5/types/types.go59
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/ocicrypt/MAINTAINERS5
-rw-r--r--vendor/github.com/containers/ocicrypt/Makefile31
-rw-r--r--vendor/github.com/containers/ocicrypt/README.md32
-rw-r--r--vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go160
-rw-r--r--vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go193
-rw-r--r--vendor/github.com/containers/ocicrypt/config/config.go114
-rw-r--r--vendor/github.com/containers/ocicrypt/config/constructors.go134
-rw-r--r--vendor/github.com/containers/ocicrypt/encryption.go325
-rw-r--r--vendor/github.com/containers/ocicrypt/go.mod18
-rw-r--r--vendor/github.com/containers/ocicrypt/go.sum73
-rw-r--r--vendor/github.com/containers/ocicrypt/gpg.go425
-rw-r--r--vendor/github.com/containers/ocicrypt/gpgvault.go100
-rw-r--r--vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go132
-rw-r--r--vendor/github.com/containers/ocicrypt/keywrap/keywrap.go40
-rw-r--r--vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go269
-rw-r--r--vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go132
-rw-r--r--vendor/github.com/containers/ocicrypt/reader.go40
-rw-r--r--vendor/github.com/containers/ocicrypt/spec/spec.go12
-rw-r--r--vendor/github.com/containers/ocicrypt/utils/delayedreader.go109
-rw-r--r--vendor/github.com/containers/ocicrypt/utils/ioutils.go31
-rw-r--r--vendor/github.com/containers/ocicrypt/utils/testing.go166
-rw-r--r--vendor/github.com/containers/ocicrypt/utils/utils.go220
-rw-r--r--vendor/github.com/fullsailor/pkcs7/.gitignore24
-rw-r--r--vendor/github.com/fullsailor/pkcs7/.travis.yml7
-rw-r--r--vendor/github.com/fullsailor/pkcs7/LICENSE22
-rw-r--r--vendor/github.com/fullsailor/pkcs7/README.md8
-rw-r--r--vendor/github.com/fullsailor/pkcs7/ber.go248
-rw-r--r--vendor/github.com/fullsailor/pkcs7/pkcs7.go962
-rw-r--r--vendor/github.com/fullsailor/pkcs7/x509.go133
-rw-r--r--vendor/github.com/imdario/mergo/.travis.yml2
-rw-r--r--vendor/github.com/imdario/mergo/merge.go50
-rw-r--r--vendor/github.com/mattn/go-isatty/.travis.yml13
-rw-r--r--vendor/github.com/mattn/go-isatty/LICENSE9
-rw-r--r--vendor/github.com/mattn/go-isatty/README.md50
-rw-r--r--vendor/github.com/mattn/go-isatty/doc.go2
-rw-r--r--vendor/github.com/mattn/go-isatty/go.mod3
-rw-r--r--vendor/github.com/mattn/go-isatty/go.sum2
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_android.go23
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go24
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_others.go15
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go22
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_tcgets.go19
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_windows.go94
-rw-r--r--vendor/github.com/vbauerster/mpb/.travis.yml14
-rw-r--r--vendor/github.com/vbauerster/mpb/LICENSE29
-rw-r--r--vendor/github.com/vbauerster/mpb/bar.go399
-rw-r--r--vendor/github.com/vbauerster/mpb/bar_filler.go111
-rw-r--r--vendor/github.com/vbauerster/mpb/bar_option.go193
-rw-r--r--vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go13
-rw-r--r--vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go77
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/counters.go208
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/decorator.go152
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/elapsed.go68
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/eta.go206
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/name.go45
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/percentage.go39
-rw-r--r--vendor/github.com/vbauerster/mpb/decor/speed.go271
-rw-r--r--vendor/github.com/vbauerster/mpb/doc.go6
-rw-r--r--vendor/github.com/vbauerster/mpb/go.test.sh12
-rw-r--r--vendor/github.com/vbauerster/mpb/internal/percentage.go12
-rw-r--r--vendor/github.com/vbauerster/mpb/options.go90
-rw-r--r--vendor/github.com/vbauerster/mpb/progress.go267
-rw-r--r--vendor/github.com/vbauerster/mpb/progress_posix.go70
-rw-r--r--vendor/github.com/vbauerster/mpb/progress_windows.go43
-rw-r--r--vendor/github.com/vbauerster/mpb/proxyreader.go22
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/.gitignore (renamed from vendor/github.com/vbauerster/mpb/.gitignore)0
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/.travis.yml12
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/README.md (renamed from vendor/github.com/vbauerster/mpb/README.md)63
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/UNLICENSE24
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/bar.go477
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/bar_filler.go137
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/bar_option.go209
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/cwriter/writer.go (renamed from vendor/github.com/vbauerster/mpb/cwriter/writer.go)45
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/cwriter/writer_posix.go9
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/cwriter/writer_windows.go60
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/counters.go84
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/decorator.go173
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/doc.go (renamed from vendor/github.com/vbauerster/mpb/decor/doc.go)6
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go48
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/eta.go212
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/merge.go97
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/moving_average.go (renamed from vendor/github.com/vbauerster/mpb/decor/moving-average.go)29
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/name.go27
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go36
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/percentage.go72
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/size_type.go109
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/sizeb1000_string.go41
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/sizeb1024_string.go41
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/speed.go175
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/decor/spinner.go35
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/doc.go2
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/go.mod9
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/go.sum11
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/internal/percentage.go15
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/options.go105
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/priority_queue.go (renamed from vendor/github.com/vbauerster/mpb/priority_queue.go)22
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/progress.go394
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/proxyreader.go45
-rw-r--r--vendor/github.com/vbauerster/mpb/v4/spinner_filler.go (renamed from vendor/github.com/vbauerster/mpb/spinner_filler.go)17
-rw-r--r--vendor/golang.org/x/crypto/ed25519/ed25519.go222
-rw-r--r--vendor/golang.org/x/crypto/ed25519/ed25519_go113.go73
-rw-r--r--vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go1422
-rw-r--r--vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go1793
-rw-r--r--vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go4
-rw-r--r--vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go6
-rw-r--r--vendor/golang.org/x/crypto/openpgp/packet/private_key.go2
-rw-r--r--vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go77
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc1
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/.gitignore7
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/.travis.yml46
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md10
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md14
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/LICENSE202
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/README.md118
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/asymmetric.go592
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go196
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go75
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go62
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go109
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/crypter.go535
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/doc.go27
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/encoding.go179
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/LICENSE27
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/README.md13
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/decode.go1183
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/encode.go1197
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/indent.go141
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/scanner.go623
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/stream.go480
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/tags.go44
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/jwe.go294
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/jwk.go608
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/jws.go321
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/opaque.go83
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/shared.go499
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/signing.go389
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/symmetric.go482
-rw-r--r--vendor/modules.txt37
181 files changed, 19731 insertions, 2922 deletions
diff --git a/go.mod b/go.mod
index d1e058ebb..00591da6f 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/containernetworking/plugins v0.8.2
github.com/containers/buildah v1.12.0
github.com/containers/conmon v2.0.2+incompatible // indirect
- github.com/containers/image/v5 v5.0.0
+ github.com/containers/image/v5 v5.1.0
github.com/containers/psgo v1.4.0
github.com/containers/storage v1.15.3
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
@@ -36,7 +36,6 @@ require (
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf
github.com/hashicorp/go-multierror v1.0.0
github.com/hpcloud/tail v1.0.0
- github.com/imdario/mergo v0.3.7 // indirect
github.com/json-iterator/go v1.1.8
github.com/mattn/go-isatty v0.0.8 // indirect
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
@@ -64,7 +63,7 @@ require (
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
github.com/vishvananda/netlink v1.0.0
go.uber.org/atomic v1.4.0 // indirect
- golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
+ golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2
diff --git a/go.sum b/go.sum
index ab6364a11..e63126bbb 100644
--- a/go.sum
+++ b/go.sum
@@ -53,6 +53,7 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -88,8 +89,12 @@ github.com/containers/conmon v2.0.2+incompatible h1:h2HCdd/EBpwFn7RT82Y2GyXnVUHW
github.com/containers/conmon v2.0.2+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4=
github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY=
+github.com/containers/image/v5 v5.1.0 h1:5FjAvPJniamuNNIQHkh4PnsL+n+xzs6Aonzaz5dqTEo=
+github.com/containers/image/v5 v5.1.0/go.mod h1:BKlMD34WxRo1ruGHHEOrPQP0Qci7SWoPwU6fS7arsCU=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
+github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741 h1:8tQkOcednLJtUcZgK7sPglscXtxvMOnFOa6wd09VWLM=
+github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/psgo v1.3.2 h1:jYfppPih3S/j2Yi5O14AXjd8GfCx1ph9L3YsoK3adko=
github.com/containers/psgo v1.3.2/go.mod h1:ENXXLQ5E1At4K0EUsGogXBJi/C28gwqkONWeLPI9fJ8=
github.com/containers/psgo v1.4.0 h1:D8B4fZCCZhYgc8hDyMPCiShOinmOB1TP1qe46sSC19k=
@@ -120,6 +125,7 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca h1:CJstDqYy9ClWuPcDHMTCAiUS+ckekluYetGR2iYYWuo=
github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca/go.mod h1:BO0al9TKber3XUTucLzKgoG5sq8qiOB41H7zSdfw6r8=
github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b h1:SgS+WV10y2Bubuy2HquSBori6DXj9sqRN77Hgs5H7Qc=
@@ -184,6 +190,8 @@ github.com/fsouza/go-dockerclient v1.5.0 h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVf
github.com/fsouza/go-dockerclient v1.5.0/go.mod h1:AqZZK/zFO3phxYxlTsAaeAMSdQ9mgHuhy+bjN034Qds=
github.com/fsouza/go-dockerclient v1.6.0 h1:f7j+AX94143JL1H3TiqSMkM4EcLDI0De1qD4GGn3Hig=
github.com/fsouza/go-dockerclient v1.6.0/go.mod h1:YWwtNPuL4XTX1SKJQk86cWPmmqwx+4np9qfPbb+znGc=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
@@ -262,6 +270,8 @@ github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 h1:NAAiV9ass6VReWFjuxqrMIq12WKlSULI6Gs3PxQghLA=
@@ -427,6 +437,7 @@ github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQl
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/seccomp/containers-golang v0.0.0-20180629143253-cdfdaa7543f4 h1:rOG9oHVIndNR14f3HRyBy9UPQYmIPniWqTU1TDdHhq4=
@@ -435,6 +446,7 @@ github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f h1:OtU/w
github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -481,12 +493,15 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b h1:hdDRrn9OP/roL8a/e/5Zu85ldrcdndu9IeBj2OEvQm0=
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b/go.mod h1:YHaw8N660ESgMgLOZfLQqT1htFItynAUxMesFBho52s=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tLAD8IceomUCNw=
github.com/vbauerster/mpb v3.4.0+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU=
+github.com/vbauerster/mpb/v4 v4.11.1 h1:ZOYQSVHgmeanXsbyC44aDg76tBGCS/54Rk8VkL8dJGA=
+github.com/vbauerster/mpb/v4 v4.11.1/go.mod h1:vMLa1J/ZKC83G2lB/52XpqT+ZZtFG4aZOdKhmpRL1uM=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
@@ -517,8 +532,11 @@ golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ=
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
+golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -575,6 +593,7 @@ golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzC
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -625,6 +644,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index 7d977f4e3..134b7b162 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -285,7 +285,7 @@ var _ = Describe("Podman create", func() {
})
It("podman create using cross-arch image list instance by digest", func() {
- session := podmanTest.PodmanNoCache([]string{"create", "--pull=always", "--override-arch=ppc64le", "--name=foo", ALPINEARM64DIGEST})
+ session := podmanTest.PodmanNoCache([]string{"create", "--pull=always", "--override-arch=arm64", "--name=foo", ALPINEARM64DIGEST})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To((Equal(0)))
session = podmanTest.PodmanNoCache([]string{"inspect", "--format", "{{.Image}}", "foo"})
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 090d862d5..29660b6b2 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -21,12 +21,14 @@ import (
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
+ "github.com/containers/ocicrypt"
+ encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/vbauerster/mpb"
- "github.com/vbauerster/mpb/decor"
+ "github.com/vbauerster/mpb/v4"
+ "github.com/vbauerster/mpb/v4/decor"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/sync/semaphore"
)
@@ -39,9 +41,14 @@ type digestingReader struct {
validationSucceeded bool
}
-// maxParallelDownloads is used to limit the maxmimum number of parallel
-// downloads. Let's follow Firefox by limiting it to 6.
-var maxParallelDownloads = 6
+var (
+ // ErrDecryptParamsMissing is returned if there is missing decryption parameters
+ ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
+
+ // maxParallelDownloads is used to limit the maxmimum number of parallel
+ // downloads. Let's follow Firefox by limiting it to 6.
+ maxParallelDownloads = 6
+)
// compressionBufferSize is the buffer size used to compress a blob
var compressionBufferSize = 1048576
@@ -50,6 +57,7 @@ var compressionBufferSize = 1048576
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
+ var digester digest.Digester
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
}
@@ -57,9 +65,11 @@ func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digest
if !digestAlgorithm.Available() {
return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
}
+ digester = digestAlgorithm.Digester()
+
return &digestingReader{
source: source,
- digester: digestAlgorithm.Digester(),
+ digester: digester,
expectedDigest: expectedDigest,
validationFailed: false,
}, nil
@@ -99,6 +109,8 @@ type copier struct {
copyInParallel bool
compressionFormat compression.Algorithm
compressionLevel *int
+ ociDecryptConfig *encconfig.DecryptConfig
+ ociEncryptConfig *encconfig.EncryptConfig
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@@ -109,6 +121,9 @@ type imageCopier struct {
diffIDsAreNeeded bool
canModifyManifest bool
canSubstituteBlobs bool
+ ociDecryptConfig *encconfig.DecryptConfig
+ ociEncryptConfig *encconfig.EncryptConfig
+ ociEncryptLayers *[]int
}
const (
@@ -155,6 +170,20 @@ type Options struct {
ForceManifestMIMEType string
ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
+ // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ // Note: During initial encryption process of a layer, the resultant digest is not known
+ // during creation, so newDigestingReader has to be set with validateDigest = false
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
}
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
@@ -493,6 +522,15 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
}
+ // TODO: Remove src.SupportsEncryption call and interface once copyUpdatedConfigAndManifest does not depend on source Image manifest type
+ // Currently, the way copyUpdatedConfigAndManifest updates the manifest is to apply updates to the source manifest and call PutManifest
+ // of the modified source manifest. The implication is that schemas like docker2 cannot be encrypted even though the destination
+ // supports encryption because docker2 struct does not have annotations, which are required.
+ // Reference to issue: https://github.com/containers/image/issues/746
+ if options.OciEncryptLayers != nil && !src.SupportsEncryption(ctx) {
+ return nil, "", "", errors.Errorf("Encryption request but not supported by source transport %s", src.Reference().Transport().Name())
+ }
+
// If the destination is a digested reference, make a note of that, determine what digest value we're
// expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
// one item from a manifest list that matches it, accept that as a match.
@@ -524,7 +562,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
}
- if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
+ if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
return nil, "", "", err
}
@@ -552,6 +590,9 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
src: src,
// diffIDsAreNeeded is computed later
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
+ ociDecryptConfig: options.OciDecryptConfig,
+ ociEncryptConfig: options.OciEncryptConfig,
+ ociEncryptLayers: options.OciEncryptLayers,
}
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
@@ -565,15 +606,19 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
return nil, "", "", err
}
+ destRequiresOciEncryption := (isEncrypted(src) && ic.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
+
// We compute preferredManifestMIMEType only to show it in error messages.
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
- preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
+ preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption)
if err != nil {
return nil, "", "", err
}
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
+ // If encrypted and decryption keys provided, we should try to decrypt
+ ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.ociDecryptConfig != nil) || ic.ociEncryptConfig != nil
if err := ic.copyLayers(ctx); err != nil {
return nil, "", "", err
@@ -651,21 +696,28 @@ func (c *copier) Printf(format string, a ...interface{}) {
fmt.Fprintf(c.reportWriter, format, a...)
}
-func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
+// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
+func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
if dest.MustMatchRuntimeOS() {
+ c, err := src.OCIConfig(ctx)
+ if err != nil {
+ return errors.Wrapf(err, "Error parsing image configuration")
+ }
+
wantedOS := runtime.GOOS
if sys != nil && sys.OSChoice != "" {
wantedOS = sys.OSChoice
}
- c, err := src.OCIConfig(ctx)
- if err != nil {
- return errors.Wrapf(err, "Error parsing image configuration")
+ if wantedOS != c.OS {
+ return fmt.Errorf("Image operating system mismatch: image uses %q, expecting %q", c.OS, wantedOS)
}
- osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS)
- if wantedOS == "windows" && c.OS == "linux" {
- return osErr
- } else if wantedOS != "windows" && c.OS == "windows" {
- return osErr
+
+ wantedArch := runtime.GOARCH
+ if sys != nil && sys.ArchitectureChoice != "" {
+ wantedArch = sys.ArchitectureChoice
+ }
+ if wantedArch != c.Architecture {
+ return fmt.Errorf("Image architecture mismatch: image uses %q, expecting %q", c.Architecture, wantedArch)
}
}
return nil
@@ -709,6 +761,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
return err
}
srcInfosUpdated := false
+ // If we only need to check authorization, no updates required.
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
if !ic.canModifyManifest {
return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
@@ -737,7 +790,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
}
data := make([]copyLayerData, numLayers)
- copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
+ copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress) {
defer copySemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
@@ -752,18 +805,39 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
}
} else {
- cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
+ cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool)
}
data[index] = cld
}
+ // Create layer Encryption map
+ encLayerBitmap := map[int]bool{}
+ var encryptAll bool
+ if ic.ociEncryptLayers != nil {
+ encryptAll = len(*ic.ociEncryptLayers) == 0
+ totalLayers := len(srcInfos)
+ for _, l := range *ic.ociEncryptLayers {
+ // if layer is negative, it is reverse indexed.
+ encLayerBitmap[(totalLayers+l)%totalLayers] = true
+ }
+
+ if encryptAll {
+ for i := 0; i < len(srcInfos); i++ {
+ encLayerBitmap[i] = true
+ }
+ }
+ }
+
func() { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
defer progressCleanup()
for i, srcLayer := range srcInfos {
- copySemaphore.Acquire(ctx, 1)
- go copyLayerHelper(i, srcLayer, progressPool)
+ err = copySemaphore.Acquire(ctx, 1)
+ if err != nil {
+ logrus.Debug("Can't acquire semaphoer", err)
+ }
+ go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
}
// Wait for all layers to be copied
@@ -854,7 +928,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
ctx, cancel := context.WithCancel(ctx)
- pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
+ pool := mpb.NewWithContext(ctx, mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
return pool, func() {
cancel()
pool.Wait()
@@ -874,6 +948,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
prefix = prefix[:maxPrefixLen]
}
+ // onComplete will replace prefix once the bar/spinner has completed
+ onComplete = prefix + " " + onComplete
+
// Use a normal progress bar when we know the size (i.e., size > 0).
// Otherwise, use a spinner to indicate that something's happening.
var bar *mpb.Bar
@@ -881,10 +958,10 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
bar = pool.AddBar(info.Size,
mpb.BarClearOnComplete(),
mpb.PrependDecorators(
- decor.Name(prefix),
+ decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
- decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
+ decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
),
)
} else {
@@ -893,10 +970,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
mpb.BarClearOnComplete(),
mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}),
mpb.PrependDecorators(
- decor.Name(prefix),
- ),
- mpb.AppendDecorators(
- decor.OnComplete(decor.Name(""), " "+onComplete),
+ decor.OnComplete(decor.Name(prefix), onComplete),
),
)
}
@@ -919,7 +993,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
progressPool, progressCleanup := c.newProgressPool(ctx)
defer progressCleanup()
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
- destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
+ destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar)
if err != nil {
return types.BlobInfo{}, err
}
@@ -945,9 +1019,10 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
-func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
+func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
- diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
+ // Diffs are needed if we are encrypting an image or trying to decrypt an image
+ diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.ociDecryptConfig != nil)
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
if !diffIDIsNeeded {
@@ -972,7 +1047,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
- blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, Annotations: srcInfo.Annotations}, diffIDIsNeeded, bar)
+ blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar)
if err != nil {
return types.BlobInfo{}, "", err
}
@@ -1003,7 +1078,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po
// perhaps compressing the stream if canCompress,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
+ diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
@@ -1012,7 +1087,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
pipeReader, pipeWriter := io.Pipe()
defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
- pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
+ _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
@@ -1027,7 +1102,10 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
return pipeWriter
}
}
- blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
+ ic.c.ociDecryptConfig = ic.ociDecryptConfig
+ ic.c.ociEncryptConfig = ic.ociEncryptConfig
+
+ blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
@@ -1064,7 +1142,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
- canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
+ canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
@@ -1078,7 +1156,29 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
}
+
var destStream io.Reader = digestingReader
+ var decrypted bool
+ if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
+ newDesc := imgspecv1.Descriptor{
+ Annotations: srcInfo.Annotations,
+ }
+
+ var d digest.Digest
+ destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error decrypting layer %s", srcInfo.Digest)
+ }
+
+ srcInfo.Digest = d
+ srcInfo.Size = -1
+ for k := range srcInfo.Annotations {
+ if strings.HasPrefix(k, "org.opencontainers.image.enc") {
+ delete(srcInfo.Annotations, k)
+ }
+ }
+ decrypted = true
+ }
// === Detect compression of the input stream.
// This requires us to “peek ahead†into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
@@ -1101,7 +1201,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
var compressionOperation types.LayerCompression
- if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
+ if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
+ // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
+ logrus.Debugf("Using original blob without modification for encrypted blob")
+ compressionOperation = types.PreserveOriginal
+ inputInfo = srcInfo
+ } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
@@ -1152,15 +1257,51 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo = srcInfo
}
+ // Perform image encryption for valid mediatypes if ociEncryptConfig provided
+ var (
+ encrypted bool
+ finalizer ocicrypt.EncryptLayerFinalizer
+ )
+ if toEncrypt {
+ if decrypted {
+ return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
+ }
+
+ if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
+ var annotations map[string]string
+ if !decrypted {
+ annotations = srcInfo.Annotations
+ }
+ desc := imgspecv1.Descriptor{
+ MediaType: srcInfo.MediaType,
+ Digest: srcInfo.Digest,
+ Size: srcInfo.Size,
+ Annotations: annotations,
+ }
+
+ s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error encrypting blob %s", srcInfo.Digest)
+ }
+
+ destStream = s
+ finalizer = fin
+ inputInfo.Digest = ""
+ inputInfo.Size = -1
+ encrypted = true
+ }
+ }
+
// === Report progress using the c.progress channel, if required.
if c.progress != nil && c.progressInterval > 0 {
- destStream = &progressReader{
- source: destStream,
- channel: c.progress,
- interval: c.progressInterval,
- artifact: srcInfo,
- lastTime: time.Now(),
- }
+ progressReader := newProgressReader(
+ destStream,
+ c.progress,
+ c.progressInterval,
+ srcInfo,
+ )
+ defer progressReader.reportDone()
+ destStream = progressReader
}
// === Finally, send the layer stream to dest.
@@ -1176,6 +1317,21 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
if canModifyBlob && !isConfig {
uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat
}
+ if decrypted {
+ uploadedInfo.CryptoOperation = types.Decrypt
+ } else if encrypted {
+ encryptAnnotations, err := finalizer()
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption")
+ }
+ uploadedInfo.CryptoOperation = types.Encrypt
+ if uploadedInfo.Annotations == nil {
+ uploadedInfo.Annotations = map[string]string{}
+ }
+ for k, v := range encryptAnnotations {
+ uploadedInfo.Annotations[k] = v
+ }
+ }
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
@@ -1218,7 +1374,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
- dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
+ _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel)
diff --git a/vendor/github.com/containers/image/v5/copy/encrypt.go b/vendor/github.com/containers/image/v5/copy/encrypt.go
new file mode 100644
index 000000000..a18d6f151
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/encrypt.go
@@ -0,0 +1,24 @@
+package copy
+
+import (
+ "strings"
+
+ "github.com/containers/image/v5/types"
+)
+
+// isOciEncrypted returns a bool indicating if a mediatype is encrypted
+// This function will be moved to be part of OCI spec when adopted.
+func isOciEncrypted(mediatype string) bool {
+ return strings.HasSuffix(mediatype, "+encrypted")
+}
+
+// isEncrypted checks if an image is encrypted
+func isEncrypted(i types.Image) bool {
+ layers := i.LayerInfos()
+ for _, l := range layers {
+ if isOciEncrypted(l.MediaType) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go
index f5f6c9c5f..bcf082df3 100644
--- a/vendor/github.com/containers/image/v5/copy/manifest.go
+++ b/vendor/github.com/containers/image/v5/copy/manifest.go
@@ -42,7 +42,7 @@ func (os *orderedSet) append(s string) {
// Note that the conversion will only happen later, through ic.src.UpdatedImage
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
// and a list of other possible alternatives, in order.
-func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) {
+func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
_, srcType, err := ic.src.Manifest(ctx)
if err != nil { // This should have been cached?!
return "", nil, errors.Wrap(err, "Error reading manifest")
@@ -57,12 +57,14 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
}
- if len(destSupportedManifestMIMETypes) == 0 {
+ if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
}
supportedByDest := map[string]struct{}{}
for _, t := range destSupportedManifestMIMETypes {
- supportedByDest[t] = struct{}{}
+ if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) {
+ supportedByDest[t] = struct{}{}
+ }
}
// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go
index 1d0c41bce..0761065a2 100644
--- a/vendor/github.com/containers/image/v5/copy/progress_reader.go
+++ b/vendor/github.com/containers/image/v5/copy/progress_reader.go
@@ -9,20 +9,71 @@ import (
// progressReader is a reader that reports its progress on an interval.
type progressReader struct {
- source io.Reader
- channel chan types.ProgressProperties
- interval time.Duration
- artifact types.BlobInfo
- lastTime time.Time
- offset uint64
+ source io.Reader
+ channel chan<- types.ProgressProperties
+ interval time.Duration
+ artifact types.BlobInfo
+ lastUpdate time.Time
+ offset uint64
+ offsetUpdate uint64
}
+// newProgressReader creates a new progress reader for:
+// `source`: The source when internally reading bytes
+// `channel`: The reporter channel to which the progress will be sent
+// `interval`: The update interval to indicate how often the progress should update
+// `artifact`: The blob metadata which is currently being progressed
+func newProgressReader(
+ source io.Reader,
+ channel chan<- types.ProgressProperties,
+ interval time.Duration,
+ artifact types.BlobInfo,
+) *progressReader {
+ // The progress reader constructor informs the progress channel
+ // that a new artifact will be read
+ channel <- types.ProgressProperties{
+ Event: types.ProgressEventNewArtifact,
+ Artifact: artifact,
+ }
+ return &progressReader{
+ source: source,
+ channel: channel,
+ interval: interval,
+ artifact: artifact,
+ lastUpdate: time.Now(),
+ offset: 0,
+ offsetUpdate: 0,
+ }
+}
+
+// reportDone indicates to the internal channel that the progress has been
+// finished
+func (r *progressReader) reportDone() {
+ r.channel <- types.ProgressProperties{
+ Event: types.ProgressEventDone,
+ Artifact: r.artifact,
+ Offset: r.offset,
+ OffsetUpdate: r.offsetUpdate,
+ }
+}
+
+// Read continuously reads bytes into the progress reader and reports the
+// status via the internal channel
func (r *progressReader) Read(p []byte) (int, error) {
n, err := r.source.Read(p)
r.offset += uint64(n)
- if time.Since(r.lastTime) > r.interval {
- r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset}
- r.lastTime = time.Now()
+ r.offsetUpdate += uint64(n)
+
+ // Fire the progress reader in the provided interval
+ if time.Since(r.lastUpdate) > r.interval {
+ r.channel <- types.ProgressProperties{
+ Event: types.ProgressEventRead,
+ Artifact: r.artifact,
+ Offset: r.offset,
+ OffsetUpdate: r.offsetUpdate,
+ }
+ r.lastUpdate = time.Now()
+ r.offsetUpdate = 0
}
return n, err
}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index 2d6650de7..caa7a207f 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -112,7 +112,7 @@ func (d *dirImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *dirImageDestination) MustMatchRuntimeOS() bool {
return false
}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go
index 5845f63be..1cf197429 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go
@@ -36,7 +36,7 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.
return nil, errors.New("docker-archive doesn't support modifying existing images")
}
- tarDest := tarfile.NewDestination(fh, ref.destinationRef)
+ tarDest := tarfile.NewDestinationWithContext(sys, fh, ref.destinationRef)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go
index a90707437..6a628508d 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/src.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/src.go
@@ -2,6 +2,7 @@ package archive
import (
"context"
+
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
@@ -14,11 +15,11 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
-func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) {
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
if ref.destinationRef != nil {
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
}
- src, err := tarfile.NewSourceFromFile(ref.path)
+ src, err := tarfile.NewSourceFromFileWithContext(sys, ref.path)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go
index 44213bb8d..46c01891f 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/transport.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go
@@ -134,7 +134,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
- src, err := newImageSource(ctx, ref)
+ src, err := newImageSource(ctx, sys, ref)
if err != nil {
return nil, err
}
@@ -144,7 +144,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- return newImageSource(ctx, ref)
+ return newImageSource(ctx, sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
index 25ce55a17..c6afd4bde 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
@@ -54,7 +54,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
- Destination: tarfile.NewDestination(writer, namedTaggedRef),
+ Destination: tarfile.NewDestinationWithContext(sys, writer, namedTaggedRef),
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
@@ -73,7 +73,9 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
if err == nil {
reader.Close()
} else {
- reader.CloseWithError(err)
+ if err := reader.CloseWithError(err); err != nil {
+ logrus.Debugf("imageLoadGoroutine: Error during reader.CloseWithError: %v", err)
+ }
}
}()
@@ -90,7 +92,7 @@ func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompressio
return types.PreserveOriginal
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
return d.mustMatchRuntimeOS
}
@@ -109,7 +111,9 @@ func (d *daemonImageDestination) Close() error {
// immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
// the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
// Whether that works or not, closing the PipeWriter seems desirable in any case.
- d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()"))
+ if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil {
+ return err
+ }
}
d.goroutineCancel()
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
index 46fbcc4e0..1827f811d 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
@@ -13,11 +13,6 @@ type daemonImageSource struct {
*tarfile.Source // Implements most of types.ImageSource
}
-type layerInfo struct {
- path string
- size int64
-}
-
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
//
@@ -40,7 +35,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
}
defer inputStream.Close()
- src, err := tarfile.NewSourceFromStream(inputStream)
+ src, err := tarfile.NewSourceFromStreamWithSystemContext(sys, inputStream)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index 0b012c703..986bcb986 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -440,7 +440,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
// If the delta between the date and now is positive, use it.
// Otherwise, fall back to using the default exponential back off.
if t, err := http.ParseTime(after); err == nil {
- delta := int64(t.Sub(time.Now()).Seconds())
+ delta := int64(time.Until(t).Seconds())
if delta > 0 {
return min(delta, maxDelay)
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 417d97aec..47a73d868 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -94,7 +94,7 @@ func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
return false
}
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
index b02c60bb3..c322156b5 100644
--- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go
@@ -29,10 +29,17 @@ type Destination struct {
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
config []byte
+ sysCtx *types.SystemContext
}
// NewDestination returns a tarfile.Destination for the specified io.Writer.
+// Deprecated: please use NewDestinationWithContext instead
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
+ return NewDestinationWithContext(nil, dest, ref)
+}
+
+// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
+func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
@@ -42,6 +49,7 @@ func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
tar: tar.NewWriter(dest),
repoTags: repoTags,
blobs: make(map[digest.Digest]types.BlobInfo),
+ sysCtx: sys,
}
}
@@ -70,7 +78,7 @@ func (d *Destination) AcceptsForeignLayerURLs() bool {
return false
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
return false
}
@@ -99,7 +107,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
- streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob")
+ streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
@@ -113,7 +121,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
if err != nil {
return types.BlobInfo{}, err
}
- _, err = streamCopy.Seek(0, os.SEEK_SET)
+ _, err = streamCopy.Seek(0, io.SeekStart)
if err != nil {
return types.BlobInfo{}, err
}
diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go
index ad0a3d2cb..80dd753e4 100644
--- a/vendor/github.com/containers/image/v5/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go
@@ -46,7 +46,14 @@ type layerInfo struct {
// To do for both the NewSourceFromFile and NewSourceFromStream functions
// NewSourceFromFile returns a tarfile.Source for the specified path.
+// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
+// for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromFile(path string) (*Source, error) {
+ return NewSourceFromFileWithContext(nil, path)
+}
+
+// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
+func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
file, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
@@ -65,16 +72,25 @@ func NewSourceFromFile(path string) (*Source, error) {
tarPath: path,
}, nil
}
- return NewSourceFromStream(stream)
+ return NewSourceFromStreamWithSystemContext(sys, stream)
}
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
+// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
+// temp directory for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
+ return NewSourceFromStreamWithSystemContext(nil, inputStream)
+}
+
+// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
+// which can be either compressed or uncompressed. The caller can close the
+// inputStream immediately after NewSourceFromFile returns.
+func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
// FIXME: use SystemContext here.
// Save inputStream to a temporary file
- tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar")
+ tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file")
}
@@ -146,7 +162,7 @@ func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
}
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
// We follow only one symlink; so no loops are possible.
- if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
index 23664a74a..d0bbbba8a 100644
--- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
+++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
@@ -48,8 +48,8 @@ func init() {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
- isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
- if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go
index 1a1c39d55..48ddb174e 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/image/docker_schema1.go
@@ -200,3 +200,8 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil
}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go
index 254c13f78..7891562b7 100644
--- a/vendor/github.com/containers/image/v5/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go
@@ -355,3 +355,8 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa
}
return json.Marshal(rawContents)
}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
+ return false
+}
diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go
index fe66da157..c574fa9fc 100644
--- a/vendor/github.com/containers/image/v5/image/manifest.go
+++ b/vendor/github.com/containers/image/v5/image/manifest.go
@@ -44,6 +44,8 @@ type genericManifest interface {
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error)
+ // SupportsEncryption returns if encryption is supported for the manifest type
+ SupportsEncryption(ctx context.Context) bool
}
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go
index 18a38d463..059d84977 100644
--- a/vendor/github.com/containers/image/v5/image/oci.go
+++ b/vendor/github.com/containers/image/v5/image/oci.go
@@ -212,3 +212,8 @@ func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
m1 := manifestSchema2FromComponents(config, m.src, nil, layers)
return memoryImageFromManifest(m1), nil
}
+
+// SupportsEncryption returns if encryption is supported for the manifest type
+func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
+ return true
+}
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
index 4bf170156..91c64a1b8 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
@@ -5,9 +5,6 @@
// +build linux
// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
-//
-// Deprecated: Most callers should use either golang.org/x/sys/unix directly,
-// or the original (and more extensive) github.com/jsipprell/keyctl .
package keyctl
import (
diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
index 8c776929c..a3081f4f2 100644
--- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
+++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
@@ -3,6 +3,8 @@ package tmpdir
import (
"os"
"runtime"
+
+ "github.com/containers/image/v5/types"
)
// unixTempDirForBigFiles is the directory path to store big files on non Windows systems.
@@ -18,7 +20,10 @@ const builtinUnixTempDirForBigFiles = "/var/tmp"
// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
// which on systemd based systems could be the unsuitable tmpfs filesystem.
-func TemporaryDirectoryForBigFiles() string {
+func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
+ if sys != nil && sys.BigFilesTemporaryDir != "" {
+ return sys.BigFilesTemporaryDir
+ }
var temporaryDirectoryForBigFiles string
if runtime.GOOS == "windows" {
temporaryDirectoryForBigFiles = os.TempDir()
diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go
index 6d10430fd..c7d741dc2 100644
--- a/vendor/github.com/containers/image/v5/manifest/list.go
+++ b/vendor/github.com/containers/image/v5/manifest/list.go
@@ -66,9 +66,7 @@ func dupStringSlice(list []string) []string {
return nil
}
dup := make([]string, len(list))
- for i := range list {
- dup[i] = list[i]
- }
+ copy(dup, list)
return dup
}
diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go
index 5b4d341d8..033b8d951 100644
--- a/vendor/github.com/containers/image/v5/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/manifest/manifest.go
@@ -206,6 +206,11 @@ func MIMETypeIsMultiImage(mimeType string) bool {
return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex
}
+// MIMETypeSupportsEncryption returns true if the mimeType supports encryption
+func MIMETypeSupportsEncryption(mimeType string) bool {
+ return mimeType == imgspecv1.MediaTypeImageManifest
+}
+
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
func NormalizedMIMEType(input string) string {
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 46c551b18..2d27d9433 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -3,9 +3,11 @@ package manifest
import (
"encoding/json"
"fmt"
+ "strings"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
+ ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -33,7 +35,7 @@ type OCI1 struct {
// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type.
func SupportedOCI1MediaType(m string) error {
switch m {
- case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader:
+ case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
return nil
default:
return fmt.Errorf("unsupported OCIv1 media type: %q", m)
@@ -117,7 +119,7 @@ func isOCI1Layer(mimeType string) bool {
}
}
-// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
@@ -125,11 +127,20 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
original := m.Layers
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
for i, info := range layerInfos {
+ mimeType := original[i].MediaType
// First make sure we support the media type of the original layer.
if err := SupportedOCI1MediaType(original[i].MediaType); err != nil {
return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType)
}
+ if info.CryptoOperation == types.Decrypt {
+ decMimeType, err := getDecryptedMediaType(mimeType)
+ if err != nil {
+ return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType)
+ }
+ mimeType = decMimeType
+ }
+
// Set the correct media types based on the specified compression
// operation, the desired compression algorithm AND the original media
// type.
@@ -142,31 +153,29 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
switch info.CompressionOperation {
case types.PreserveOriginal:
// Keep the original media type.
- m.Layers[i].MediaType = original[i].MediaType
+ m.Layers[i].MediaType = mimeType
case types.Decompress:
// Decompress the original media type and check if it was
// non-distributable one or not.
- mimeType := original[i].MediaType
switch {
case isOCI1NonDistributableLayer(mimeType):
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
case isOCI1Layer(mimeType):
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer
default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType)
+ return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", mimeType)
}
case types.Compress:
if info.CompressionAlgorithm == nil {
logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
- m.Layers[i].MediaType = original[i].MediaType
+ m.Layers[i].MediaType = mimeType
break
}
// Compress the original media type and set the new one based on
// that type (distributable or not) and the specified compression
// algorithm. Throw an error if the algorithm is not supported.
- mimeType := original[i].MediaType
switch info.CompressionAlgorithm.Name() {
case compression.Gzip.Name():
switch {
@@ -175,7 +184,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
case isOCI1Layer(mimeType):
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip
default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType)
+ return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
}
case compression.Zstd.Name():
@@ -185,7 +194,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
case isOCI1Layer(mimeType):
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd
default:
- return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType)
+ return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
}
default:
@@ -195,6 +204,15 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
default:
return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
}
+
+ if info.CryptoOperation == types.Encrypt {
+ encMediaType, err := getEncryptedMediaType(m.Layers[i].MediaType)
+ if err != nil {
+ return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", m.Layers[i].MediaType)
+ }
+ m.Layers[i].MediaType = encMediaType
+ }
+
m.Layers[i].Digest = info.Digest
m.Layers[i].Size = info.Size
m.Layers[i].Annotations = info.Annotations
@@ -220,7 +238,9 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
return nil, err
}
d1 := &Schema2V1Image{}
- json.Unmarshal(config, d1)
+ if err := json.Unmarshal(config, d1); err != nil {
+ return nil, err
+ }
i := &types.ImageInspectInfo{
Tag: "",
Created: v1.Created,
@@ -241,3 +261,30 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
}
return m.Config.Digest.Hex(), nil
}
+
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support encryption
+func getEncryptedMediaType(mediatype string) (string, error) {
+ for _, s := range strings.Split(mediatype, "+")[1:] {
+ if s == "encrypted" {
+ return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
+ }
+ }
+ unsuffixedMediatype := strings.Split(mediatype, "+")[0]
+ switch unsuffixedMediatype {
+ case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
+ return mediatype + "+encrypted", nil
+ }
+
+ return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
+}
+
+// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
+// an error if the mediatype does not support decryption
+func getDecryptedMediaType(mediatype string) (string, error) {
+ if !strings.HasSuffix(mediatype, "+encrypted") {
+ return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
+ }
+
+ return strings.TrimSuffix(mediatype, "+encrypted"), nil
+}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
index 164d5522d..0509eaa83 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/storage/pkg/archive"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
type ociArchiveImageDestination struct {
@@ -19,7 +20,7 @@ type ociArchiveImageDestination struct {
// newImageDestination returns an ImageDestination for writing to an existing directory.
func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
- tempDirRef, err := createOCIRef(ref.image)
+ tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
return nil, errors.Wrapf(err, "error creating oci reference")
}
@@ -43,7 +44,10 @@ func (d *ociArchiveImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any
// Close deletes the temp directory of the oci-archive image
func (d *ociArchiveImageDestination) Close() error {
- defer d.tempDirRef.deleteTempDir()
+ defer func() {
+ err := d.tempDirRef.deleteTempDir()
+ logrus.Debugf("Error deleting temporary directory: %v", err)
+ }()
return d.unpackedDest.Close()
}
@@ -66,7 +70,7 @@ func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool {
return d.unpackedDest.AcceptsForeignLayerURLs()
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise
func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
return d.unpackedDest.MustMatchRuntimeOS()
}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
index 33a41d44b..8f07b3307 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go
@@ -9,6 +9,7 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
type ociArchiveImageSource struct {
@@ -20,7 +21,7 @@ type ociArchiveImageSource struct {
// newImageSource returns an ImageSource for reading from an existing directory.
// newImageSource untars the file and saves it in a temp directory
func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
- tempDirRef, err := createUntarTempDir(ref)
+ tempDirRef, err := createUntarTempDir(sys, ref)
if err != nil {
return nil, errors.Wrap(err, "error creating temp directory")
}
@@ -38,16 +39,25 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiv
}
// LoadManifestDescriptor loads the manifest
+// Deprecated: use LoadManifestDescriptorWithContext instead
func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
+ return LoadManifestDescriptorWithContext(nil, imgRef)
+}
+
+// LoadManifestDescriptorWithContext loads the manifest
+func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
ociArchRef, ok := imgRef.(ociArchiveReference)
if !ok {
return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference")
}
- tempDirRef, err := createUntarTempDir(ociArchRef)
+ tempDirRef, err := createUntarTempDir(sys, ociArchRef)
if err != nil {
return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory")
}
- defer tempDirRef.deleteTempDir()
+ defer func() {
+ err := tempDirRef.deleteTempDir()
+ logrus.Debugf("Error deleting temporary directory: %v", err)
+ }()
descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
if err != nil {
@@ -64,7 +74,10 @@ func (s *ociArchiveImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
// Close deletes the temporary directory at dst
func (s *ociArchiveImageSource) Close() error {
- defer s.tempDirRef.deleteTempDir()
+ defer func() {
+ err := s.tempDirRef.deleteTempDir()
+ logrus.Debugf("error deleting tmp dir: %v", err)
+ }()
return s.unpackedSrc.Close()
}
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
index 2d72a6fee..3033b4a27 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
@@ -96,7 +96,7 @@ func (ref ociArchiveReference) PolicyConfigurationIdentity() string {
// NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
// same image and the two can’t be statically disambiguated. Using at least the repository directory is
// less granular but hopefully still useful.
- return fmt.Sprintf("%s", ref.resolvedFile)
+ return ref.resolvedFile
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
@@ -159,8 +159,9 @@ func (t *tempDirOCIRef) deleteTempDir() error {
}
// createOCIRef creates the oci reference of the image
-func createOCIRef(image string) (tempDirOCIRef, error) {
- dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci")
+// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
+func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
+ dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
if err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
}
@@ -174,8 +175,8 @@ func createOCIRef(image string) (tempDirOCIRef, error) {
}
// creates the temporary directory and copies the tarred content to it
-func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) {
- tempDirRef, err := createOCIRef(ref.image)
+func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
+ tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference")
}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index 370e8d2cd..fb0449ca5 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -97,7 +97,7 @@ func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *ociImageDestination) MustMatchRuntimeOS() bool {
return false
}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index c662c9a7a..a99b63158 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -124,7 +124,7 @@ func (ref ociReference) PolicyConfigurationIdentity() string {
// NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
// same image and the two can’t be statically disambiguated. Using at least the repository directory is
// less granular but hopefully still useful.
- return fmt.Sprintf("%s", ref.resolvedDir)
+ return ref.resolvedDir
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index f45dc24c4..585b75069 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -19,6 +19,7 @@ import (
"github.com/ghodss/yaml"
"github.com/imdario/mergo"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"golang.org/x/net/http2"
"k8s.io/client-go/util/homedir"
)
@@ -137,9 +138,8 @@ func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, e
return nil, err
}
- var mergedClientConfig clientConfig
// REMOVED: Interactive fallback support.
- mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig)
+ mergedClientConfig := newNonInteractiveClientConfig(*mergedConfig)
config.clientConfig = mergedClientConfig
}
@@ -210,13 +210,17 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
if err != nil {
return nil, err
}
- mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig)
+ if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil {
+ return nil, err
+ }
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
if err != nil {
return nil, err
}
- mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig)
+ if err = mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig); err != nil {
+ return nil, err
+ }
}
return clientConfig, nil
@@ -237,7 +241,9 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf
configClientConfig.CAFile = configClusterInfo.CertificateAuthority
configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
- mergo.MergeWithOverwrite(mergedConfig, configClientConfig)
+ if err := mergo.MergeWithOverwrite(mergedConfig, configClientConfig); err != nil {
+ return nil, err
+ }
return mergedConfig, nil
}
@@ -272,14 +278,6 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest
return mergedConfig, nil
}
-// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser
-func canIdentifyUser(config restConfig) bool {
- return len(config.Username) > 0 ||
- (len(config.CertFile) > 0 || len(config.CertData) > 0) ||
- len(config.BearerToken) > 0
-
-}
-
// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
@@ -320,7 +318,9 @@ func (config *directClientConfig) getContext() clientcmdContext {
var mergedContext clientcmdContext
if configContext, exists := contexts[contextName]; exists {
- mergo.MergeWithOverwrite(&mergedContext, configContext)
+ if err := mergo.MergeWithOverwrite(&mergedContext, configContext); err != nil {
+ logrus.Debugf("Can't merge configContext: %v", err)
+ }
}
// REMOVED: overrides support
@@ -333,6 +333,17 @@ var (
errEmptyCluster = errors.New("cluster has no server defined")
)
+//helper for checking certificate/key/CA
+func validateFileIsReadable(name string) error {
+ answer, err := os.Open(name)
+ defer func() {
+ if err := answer.Close(); err != nil {
+ logrus.Debugf("Error closing %v: %v", name, err)
+ }
+ }()
+ return err
+}
+
// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo.
// validateClusterInfo looks for conflicts and errors in the cluster info
func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error {
@@ -354,8 +365,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
}
if len(clusterInfo.CertificateAuthority) != 0 {
- clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
- defer clientCertCA.Close()
+ err := validateFileIsReadable(clusterInfo.CertificateAuthority)
if err != nil {
validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
}
@@ -393,15 +403,13 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
}
if len(authInfo.ClientCertificate) != 0 {
- clientCertFile, err := os.Open(authInfo.ClientCertificate)
- defer clientCertFile.Close()
+ err := validateFileIsReadable(authInfo.ClientCertificate)
if err != nil {
validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
}
}
if len(authInfo.ClientKey) != 0 {
- clientKeyFile, err := os.Open(authInfo.ClientKey)
- defer clientKeyFile.Close()
+ err := validateFileIsReadable(authInfo.ClientKey)
if err != nil {
validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
}
@@ -423,7 +431,9 @@ func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo {
var mergedAuthInfo clientcmdAuthInfo
if configAuthInfo, exists := authInfos[authInfoName]; exists {
- mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo)
+ if err := mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo); err != nil {
+ logrus.Debugf("Can't merge configAuthInfo: %v", err)
+ }
}
// REMOVED: overrides support
@@ -436,10 +446,16 @@ func (config *directClientConfig) getCluster() clientcmdCluster {
clusterInfoName := config.getClusterName()
var mergedClusterInfo clientcmdCluster
- mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster)
- mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster)
+ if err := mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster); err != nil {
+ logrus.Debugf("Can't merge defaultCluster: %v", err)
+ }
+ if err := mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster); err != nil {
+ logrus.Debugf("Can't merge envVarCluster: %v", err)
+ }
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
- mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo)
+ if err := mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo); err != nil {
+ logrus.Debugf("Can't merge configClusterInfo: %v", err)
+ }
}
// REMOVED: overrides support
@@ -573,7 +589,9 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
// first merge all of our maps
mapConfig := clientcmdNewConfig()
for _, kubeconfig := range kubeconfigs {
- mergo.MergeWithOverwrite(mapConfig, kubeconfig)
+ if err := mergo.MergeWithOverwrite(mapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
}
// merge all of the struct values in the reverse order so that priority is given correctly
@@ -581,14 +599,20 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
nonMapConfig := clientcmdNewConfig()
for i := len(kubeconfigs) - 1; i >= 0; i-- {
kubeconfig := kubeconfigs[i]
- mergo.MergeWithOverwrite(nonMapConfig, kubeconfig)
+ if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
}
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
// get the values we expect.
config := clientcmdNewConfig()
- mergo.MergeWithOverwrite(config, mapConfig)
- mergo.MergeWithOverwrite(config, nonMapConfig)
+ if err := mergo.MergeWithOverwrite(config, mapConfig); err != nil {
+ return nil, err
+ }
+ if err := mergo.MergeWithOverwrite(config, nonMapConfig); err != nil {
+ return nil, err
+ }
// REMOVED: Possibility to skip this.
if err := resolveLocalPaths(config); err != nil {
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index 016de4803..b9242da6e 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -378,7 +378,7 @@ func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
return false
}
@@ -491,6 +491,9 @@ sigExists:
Content: newSig,
}
body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
_, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body)
if err != nil {
return err
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index c442b4d2e..115097055 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -120,7 +120,7 @@ func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
return true
}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index ff802cefd..60d67dfdc 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -275,7 +275,10 @@ func (config *V2RegistriesConf) postProcess() error {
// Note: we need to iterate over the registries array to ensure a
// deterministic behavior which is not guaranteed by maps.
for _, reg := range config.Registries {
- others, _ := regMap[reg.Location]
+ others, ok := regMap[reg.Location]
+ if !ok {
+ return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing")
+ }
for _, other := range others {
if reg.Insecure != other.Insecure {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
index 6785564e8..7e2142b1f 100644
--- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
+++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
@@ -99,14 +99,13 @@ func NewTransport() *http.Transport {
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
- Dial: direct.Dial,
+ DialContext: direct.DialContext,
TLSHandshakeTimeout: 10 * time.Second,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
}
- proxyDialer, err := sockets.DialerFromEnvironment(direct)
- if err == nil {
- tr.Dial = proxyDialer.Dial
+ if _, err := sockets.DialerFromEnvironment(direct); err != nil {
+ logrus.Debugf("Can't execute DialerFromEnvironment: %v", err)
}
return tr
}
diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go
index e94de2a9c..a1fb1eebb 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_eval.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go
@@ -85,7 +85,6 @@ type PolicyContext struct {
type policyContextState string
const (
- pcInvalid policyContextState = ""
pcInitializing policyContextState = "Initializing"
pcReady policyContextState = "Ready"
pcInUse policyContextState = "InUse"
diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go
index 44e70b3b9..bc1c0e575 100644
--- a/vendor/github.com/containers/image/v5/signature/signature.go
+++ b/vendor/github.com/containers/image/v5/signature/signature.go
@@ -111,8 +111,8 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil)
func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
err := s.strictUnmarshalJSON(data)
if err != nil {
- if _, ok := err.(jsonFormatError); ok {
- err = InvalidSignatureError{msg: err.Error()}
+ if formatErr, ok := err.(jsonFormatError); ok {
+ err = InvalidSignatureError{msg: formatErr.Error()}
}
}
return err
diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go
index 2b89f329f..df4b67c7a 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_image.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_image.go
@@ -147,7 +147,8 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
// callers should try to retrieve layers using their uncompressed digests, so no need to
// check if they're using one of the compressed digests, which we can't reproduce anyway.
- layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
+ layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
+
// If it's not a layer, then it must be a data item.
if len(layers) == 0 {
b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
@@ -341,8 +342,8 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
// it's time to Commit() the image
-func newImageDestination(imageRef storageReference) (*storageImageDestination, error) {
- directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage")
+func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
+ directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
if err != nil {
return nil, errors.Wrapf(err, "error creating a temporary directory")
}
@@ -930,7 +931,7 @@ func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
-// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (s *storageImageDestination) MustMatchRuntimeOS() bool {
return true
}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 4e137ad1b..5199fb535 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -93,6 +93,9 @@ func imageMatchesSystemContext(store storage.Store, img *storage.Image, manifest
}
// Load the image's configuration blob.
m, err := manifest.FromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return false
+ }
getConfig := func(blobInfo types.BlobInfo) ([]byte, error) {
return store.ImageBigData(img.ID, blobInfo.Digest.String())
}
@@ -295,5 +298,5 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC
}
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
- return newImageDestination(s)
+ return newImageDestination(sys, s)
}
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 2db8c7827..d38a17fab 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -104,6 +104,19 @@ const (
Compress
)
+// LayerCrypto indicates if layers have been encrypted or decrypted or none
+type LayerCrypto int
+
+const (
+ // PreserveOriginalCrypto indicates the layer must be preserved, ie
+ // no encryption/decryption
+ PreserveOriginalCrypto LayerCrypto = iota
+ // Encrypt indicates the layer is encrypted
+ Encrypt
+ // Decrypt indicates the layer is decrypted
+ Decrypt
+)
+
// BlobInfo collects known information about a blob (layer/config).
// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown†value here does not override that.
type BlobInfo struct {
@@ -115,11 +128,18 @@ type BlobInfo struct {
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer should be preserved or (de)compressed. The
// field defaults to preserve the original layer.
+ // TODO: To remove together with CryptoOperation in re-design to remove
+ // field out out of BlobInfo.
CompressionOperation LayerCompression
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
// set when `CompressionOperation == Compress`.
CompressionAlgorithm *compression.Algorithm
+ // CryptoOperation is used in Image.UpdateLayerInfos to instruct
+ // whether the original layer was encrypted/decrypted
+ // TODO: To remove together with CompressionOperation in re-design to
+ // remove field out out of BlobInfo.
+ CryptoOperation LayerCrypto
}
// BICTransportScope encapsulates transport-dependent representation of a “scope†where blobs are or are not present.
@@ -264,7 +284,7 @@ type ImageDestination interface {
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
AcceptsForeignLayerURLs() bool
- // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+ // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
MustMatchRuntimeOS() bool
// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
@@ -378,6 +398,8 @@ type Image interface {
// Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
// This does not change the state of the original Image object.
UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
+ // SupportsEncryption returns an indicator that the image supports encryption
+ SupportsEncryption(ctx context.Context) bool
// Size returns an approximation of the amount of disk space which is consumed by the image in its current
// location. If the size is not known, -1 will be returned.
Size() (int64, error)
@@ -448,7 +470,7 @@ const (
// OptionalBoolFalse. The function is meant to avoid boilerplate code of users.
func NewOptionalBool(b bool) OptionalBool {
o := OptionalBoolFalse
- if b == true {
+ if b {
o = OptionalBoolTrue
}
return o
@@ -490,9 +512,10 @@ type SystemContext struct {
OSChoice string
// If not "", overrides the system's default directory containing a blob info cache.
BlobInfoCacheDir string
-
// Additional tags when creating or copying a docker-archive.
DockerArchiveAdditionalTags []reference.NamedTagged
+ // If not "", overrides the temporary directory to use for storing big files
+ BigFilesTemporaryDir string
// === OCI.Transport overrides ===
// If not "", a directory containing a CA certificate (ending with ".crt"),
@@ -547,9 +570,37 @@ type SystemContext struct {
CompressionLevel *int
}
+// ProgressEvent is the type of events a progress reader can produce
+// Warning: new event types may be added any time.
+type ProgressEvent uint
+
+const (
+ // ProgressEventNewArtifact will be fired on progress reader setup
+ ProgressEventNewArtifact ProgressEvent = iota
+
+ // ProgressEventRead indicates that the artifact download is currently in
+ // progress
+ ProgressEventRead
+
+ // ProgressEventDone is fired when the data transfer has been finished for
+ // the specific artifact
+ ProgressEventDone
+)
+
// ProgressProperties is used to pass information from the copy code to a monitor which
// can use the real-time information to produce output or react to changes.
type ProgressProperties struct {
+ // The event indicating what
+ Event ProgressEvent
+
+ // The artifact which has been updated in this interval
Artifact BlobInfo
- Offset uint64
+
+ // The currently downloaded size in bytes
+ // Increases from 0 to the final Artifact size
+ Offset uint64
+
+ // The additional offset which has been downloaded inside the last update
+ // interval. Will be reset after each ProgressEventRead event.
+ OffsetUpdate uint64
}
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 572be2b89..a92774466 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 0
+ VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS
new file mode 100644
index 000000000..e6a7d1f0a
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS
@@ -0,0 +1,5 @@
+# ocicrypt maintainers
+#
+# Github ID, Name, Email Address
+lumjjb, Brandon Lum, lumjjb@gmail.com
+stefanberger, Stefan Berger, stefanb@linux.ibm.com
diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile
new file mode 100644
index 000000000..49fa80d74
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/Makefile
@@ -0,0 +1,31 @@
+# Copyright The containerd Authors.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: check build decoder
+
+all: build
+
+FORCE:
+
+check:
+ golangci-lint run
+
+build: vendor
+ go build ./...
+
+vendor:
+ go mod tidy
+
+test:
+ go test ./...
diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md
new file mode 100644
index 000000000..ec5ae5b35
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/README.md
@@ -0,0 +1,32 @@
+# OCIcrypt Library
+
+The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools.
+
+## Usage
+
+There are various levels of usage for this library. The main consumers of these would be runtime/buil tools, and a more specific use would be in the ability to extend cryptographic function.
+
+### Runtime/Build tool usage
+
+The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers:
+
+```
+package "github.com/containers/ocicrypt"
+func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error)
+func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error)
+```
+
+The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the resposibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.).
+
+
+### Crypto Agility and Extensibility
+
+The implementation for both symmetric and assymetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages:
+- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers
+- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping
+
+We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec.
+
+## Security Issues
+
+We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file.
diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
new file mode 100644
index 000000000..da403d95d
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
@@ -0,0 +1,160 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package blockcipher
+
+import (
+ "io"
+
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// LayerCipherType is the ciphertype as specified in the layer metadata
+type LayerCipherType string
+
+// TODO: Should be obtained from OCI spec once included
+const (
+ AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256"
+)
+
+// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt
+// an image which are sensitive and should not be in plaintext
+type PrivateLayerBlockCipherOptions struct {
+ // SymmetricKey represents the symmetric key used for encryption/decryption
+ // This field should be populated by Encrypt/Decrypt calls
+ SymmetricKey []byte `json:"symkey"`
+
+ // Digest is the digest of the original data for verification.
+ // This is NOT populated by Encrypt/Decrypt calls
+ Digest digest.Digest `json:"digest"`
+
+ // CipherOptions contains the cipher metadata used for encryption/decryption
+ // This field should be populated by Encrypt/Decrypt calls
+ CipherOptions map[string][]byte `json:"cipheroptions"`
+}
+
+// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt
+// an image which are public and can be deduplicated in plaintext across multiple
+// recipients
+type PublicLayerBlockCipherOptions struct {
+ // CipherType denotes the cipher type according to the list of OCI suppported
+ // cipher types.
+ CipherType LayerCipherType `json:"cipher"`
+
+ // Hmac contains the hmac string to help verify encryption
+ Hmac []byte `json:"hmac"`
+
+ // CipherOptions contains the cipher metadata used for encryption/decryption
+ // This field should be populated by Encrypt/Decrypt calls
+ CipherOptions map[string][]byte `json:"cipheroptions"`
+}
+
+// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions
+// required to encrypt/decrypt an image
+type LayerBlockCipherOptions struct {
+ Public PublicLayerBlockCipherOptions
+ Private PrivateLayerBlockCipherOptions
+}
+
+// LayerBlockCipher returns a provider for encrypt/decrypt functionality
+// for handling the layer data for a specific algorithm
+type LayerBlockCipher interface {
+ // GenerateKey creates a symmetric key
+ GenerateKey() ([]byte, error)
+ // Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions
+ Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error)
+ // Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions
+ Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error)
+}
+
+// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers
+type LayerBlockCipherHandler struct {
+ cipherMap map[LayerCipherType]LayerBlockCipher
+}
+
+// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob
+type Finalizer func() (LayerBlockCipherOptions, error)
+
+// GetOpt returns the value of the cipher option and if the option exists
+func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) {
+ if v, ok := lbco.Public.CipherOptions[key]; ok {
+ return v, ok
+ } else if v, ok := lbco.Private.CipherOptions[key]; ok {
+ return v, ok
+ } else {
+ return nil, false
+ }
+}
+
+func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer {
+ return func() (LayerBlockCipherOptions, error) {
+ lbco, err := fin()
+ if err != nil {
+ return LayerBlockCipherOptions{}, err
+ }
+ lbco.Public.CipherType = typ
+ return lbco, err
+ }
+}
+
+// Encrypt is the handler for the layer decryption routine
+func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) {
+ if c, ok := h.cipherMap[typ]; ok {
+ sk, err := c.GenerateKey()
+ if err != nil {
+ return nil, nil, err
+ }
+ opt := LayerBlockCipherOptions{
+ Private: PrivateLayerBlockCipherOptions{
+ SymmetricKey: sk,
+ },
+ }
+ encDataReader, fin, err := c.Encrypt(plainDataReader, opt)
+ if err == nil {
+ fin = wrapFinalizerWithType(fin, typ)
+ }
+ return encDataReader, fin, err
+ }
+ return nil, nil, errors.Errorf("unsupported cipher type: %s", typ)
+}
+
+// Decrypt is the handler for the layer decryption routine
+func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) {
+ typ := opt.Public.CipherType
+ if typ == "" {
+ return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided")
+ }
+ if c, ok := h.cipherMap[LayerCipherType(typ)]; ok {
+ return c.Decrypt(encDataReader, opt)
+ }
+ return nil, LayerBlockCipherOptions{}, errors.Errorf("unsupported cipher type: %s", typ)
+}
+
+// NewLayerBlockCipherHandler returns a new default handler
+func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) {
+ h := LayerBlockCipherHandler{
+ cipherMap: map[LayerCipherType]LayerBlockCipher{},
+ }
+
+ var err error
+ h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to set up Cipher AES-256-CTR")
+ }
+
+ return &h, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go
new file mode 100644
index 000000000..095a53e35
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go
@@ -0,0 +1,193 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package blockcipher
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "io"
+
+ "github.com/containers/ocicrypt/utils"
+ "github.com/pkg/errors"
+)
+
+// AESCTRLayerBlockCipher implements the AES CTR stream cipher
+type AESCTRLayerBlockCipher struct {
+ keylen int // in bytes
+ reader io.Reader
+ encrypt bool
+ stream cipher.Stream
+ err error
+ hmac hash.Hash
+ expHmac []byte
+ doneEncrypting bool
+}
+
+type aesctrcryptor struct {
+ bc *AESCTRLayerBlockCipher
+}
+
+// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits
+func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) {
+ if bits != 256 {
+ return nil, errors.New("AES CTR bit count not supported")
+ }
+ return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil
+}
+
+func (r *aesctrcryptor) Read(p []byte) (int, error) {
+ var (
+ o int
+ )
+
+ if r.bc.err != nil {
+ return 0, r.bc.err
+ }
+
+ o, err := utils.FillBuffer(r.bc.reader, p)
+ if err != nil {
+ if err == io.EOF {
+ r.bc.err = err
+ } else {
+ return 0, err
+ }
+ }
+
+ if !r.bc.encrypt {
+ if _, err := r.bc.hmac.Write(p[:o]); err != nil {
+ r.bc.err = errors.Wrapf(err, "could not write to hmac")
+ return 0, r.bc.err
+ }
+
+ if r.bc.err == io.EOF {
+ // Before we return EOF we let the HMAC comparison
+ // provide a verdict
+ if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) {
+ r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil))
+ return 0, r.bc.err
+ }
+ }
+ }
+
+ r.bc.stream.XORKeyStream(p[:o], p[:o])
+
+ if r.bc.encrypt {
+ if _, err := r.bc.hmac.Write(p[:o]); err != nil {
+ r.bc.err = errors.Wrapf(err, "could not write to hmac")
+ return 0, r.bc.err
+ }
+
+ if r.bc.err == io.EOF {
+ // Final data encrypted; Do the 'then-MAC' part
+ r.bc.doneEncrypting = true
+ }
+ }
+
+ return o, r.bc.err
+}
+
+// init initializes an instance
+func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) {
+ var (
+ err error
+ )
+
+ key := opts.Private.SymmetricKey
+ if len(key) != bc.keylen {
+ return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen)
+ }
+
+ nonce, ok := opts.GetOpt("nonce")
+ if !ok {
+ nonce = make([]byte, aes.BlockSize)
+ if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
+ return LayerBlockCipherOptions{}, errors.Wrap(err, "unable to generate random nonce")
+ }
+ }
+
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return LayerBlockCipherOptions{}, errors.Wrap(err, "aes.NewCipher failed")
+ }
+
+ bc.reader = reader
+ bc.encrypt = encrypt
+ bc.stream = cipher.NewCTR(block, nonce)
+ bc.err = nil
+ bc.hmac = hmac.New(sha256.New, key)
+ bc.expHmac = opts.Public.Hmac
+ bc.doneEncrypting = false
+
+ if !encrypt && len(bc.expHmac) == 0 {
+ return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process")
+ }
+
+ lbco := LayerBlockCipherOptions{
+ Private: PrivateLayerBlockCipherOptions{
+ SymmetricKey: key,
+ CipherOptions: map[string][]byte{
+ "nonce": nonce,
+ },
+ },
+ }
+
+ return lbco, nil
+}
+
+// GenerateKey creates a synmmetric key
+func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) {
+ key := make([]byte, bc.keylen)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ return nil, err
+ }
+ return key, nil
+}
+
+// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions
+func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) {
+ lbco, err := bc.init(true, plainDataReader, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ finalizer := func() (LayerBlockCipherOptions, error) {
+ if !bc.doneEncrypting {
+ return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize")
+ }
+ if lbco.Public.CipherOptions == nil {
+ lbco.Public.CipherOptions = map[string][]byte{}
+ }
+ lbco.Public.Hmac = bc.hmac.Sum(nil)
+ return lbco, nil
+ }
+ return &aesctrcryptor{bc}, finalizer, nil
+}
+
+// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions
+func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) {
+ lbco, err := bc.init(false, encDataReader, opt)
+ if err != nil {
+ return nil, LayerBlockCipherOptions{}, err
+ }
+
+ return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/config/config.go b/vendor/github.com/containers/ocicrypt/config/config.go
new file mode 100644
index 000000000..d960766eb
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/config/config.go
@@ -0,0 +1,114 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package config
+
+// EncryptConfig is the container image PGP encryption configuration holding
+// the identifiers of those that will be able to decrypt the container and
+// the PGP public keyring file data that contains their public keys.
+type EncryptConfig struct {
+ // map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s'
+ Parameters map[string][][]byte
+
+ DecryptConfig DecryptConfig
+}
+
+// DecryptConfig wraps the Parameters map that holds the decryption key
+type DecryptConfig struct {
+ // map holding 'privkeys', 'x509s', 'gpg-privatekeys'
+ Parameters map[string][][]byte
+}
+
+// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can
+// be passed through functions that share much code for encryption and decryption
+type CryptoConfig struct {
+ EncryptConfig *EncryptConfig
+ DecryptConfig *DecryptConfig
+}
+
+// InitDecryption initialized a CryptoConfig object with parameters used for decryption
+func InitDecryption(dcparameters map[string][][]byte) CryptoConfig {
+ return CryptoConfig{
+ DecryptConfig: &DecryptConfig{
+ Parameters: dcparameters,
+ },
+ }
+}
+
+// InitEncryption initializes a CryptoConfig object with parameters used for encryption
+// It also takes dcparameters that may be needed for decryption when adding a recipient
+// to an already encrypted image
+func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig {
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: parameters,
+ DecryptConfig: DecryptConfig{
+ Parameters: dcparameters,
+ },
+ },
+ }
+}
+
+// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig
+// containing the crypto configuration of all the key bundles
+func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig {
+ ecparam := map[string][][]byte{}
+ ecdcparam := map[string][][]byte{}
+ dcparam := map[string][][]byte{}
+
+ for _, cc := range ccs {
+ if ec := cc.EncryptConfig; ec != nil {
+ addToMap(ecparam, ec.Parameters)
+ addToMap(ecdcparam, ec.DecryptConfig.Parameters)
+ }
+
+ if dc := cc.DecryptConfig; dc != nil {
+ addToMap(dcparam, dc.Parameters)
+ }
+ }
+
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: ecparam,
+ DecryptConfig: DecryptConfig{
+ Parameters: ecdcparam,
+ },
+ },
+ DecryptConfig: &DecryptConfig{
+ Parameters: dcparam,
+ },
+ }
+
+}
+
+// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that
+// the decryption parameters can be used to add recipients to an existing image
+// if the user is able to decrypt it.
+func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) {
+ if dc != nil {
+ addToMap(ec.DecryptConfig.Parameters, dc.Parameters)
+ }
+}
+
+func addToMap(orig map[string][][]byte, add map[string][][]byte) {
+ for k, v := range add {
+ if ov, ok := orig[k]; ok {
+ orig[k] = append(ov, v...)
+ } else {
+ orig[k] = v
+ }
+ }
+}
diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go
new file mode 100644
index 000000000..44adcdb35
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/config/constructors.go
@@ -0,0 +1,134 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package config
+
+import (
+ "github.com/pkg/errors"
+)
+
+// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys
+func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) {
+ dc := DecryptConfig{}
+ ep := map[string][][]byte{
+ "pubkeys": pubKeys,
+ }
+
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: ep,
+ DecryptConfig: dc,
+ },
+ DecryptConfig: &dc,
+ }, nil
+}
+
+// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs
+func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) {
+ dc := DecryptConfig{}
+
+ ep := map[string][][]byte{
+ "x509s": x509s,
+ }
+
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: ep,
+ DecryptConfig: dc,
+ },
+ DecryptConfig: &dc,
+ }, nil
+}
+
+// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters
+func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) {
+ dc := DecryptConfig{}
+ ep := map[string][][]byte{
+ "gpg-recipients": gpgRecipients,
+ "gpg-pubkeyringfile": {gpgPubRingFile},
+ }
+
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: ep,
+ DecryptConfig: dc,
+ },
+ DecryptConfig: &dc,
+ }, nil
+}
+
+// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys
+func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) {
+ if len(privKeys) != len(privKeysPasswords) {
+ return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords")
+ }
+
+ dc := DecryptConfig{
+ Parameters: map[string][][]byte{
+ "privkeys": privKeys,
+ "privkeys-passwords": privKeysPasswords,
+ },
+ }
+
+ ep := map[string][][]byte{}
+
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: ep,
+ DecryptConfig: dc,
+ },
+ DecryptConfig: &dc,
+ }, nil
+}
+
+// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs
+func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) {
+ dc := DecryptConfig{
+ Parameters: map[string][][]byte{
+ "x509s": x509s,
+ },
+ }
+
+ ep := map[string][][]byte{}
+
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: ep,
+ DecryptConfig: dc,
+ },
+ DecryptConfig: &dc,
+ }, nil
+}
+
+// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys
+func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) {
+ dc := DecryptConfig{
+ Parameters: map[string][][]byte{
+ "gpg-privatekeys": gpgPrivKeys,
+ "gpg-privatekeys-passwords": gpgPrivKeysPwds,
+ },
+ }
+
+ ep := map[string][][]byte{}
+
+ return CryptoConfig{
+ EncryptConfig: &EncryptConfig{
+ Parameters: ep,
+ DecryptConfig: dc,
+ },
+ DecryptConfig: &dc,
+ }, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go
new file mode 100644
index 000000000..139ff5f93
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/encryption.go
@@ -0,0 +1,325 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package ocicrypt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "strings"
+
+ "github.com/containers/ocicrypt/blockcipher"
+ "github.com/containers/ocicrypt/config"
+ "github.com/containers/ocicrypt/keywrap"
+ "github.com/containers/ocicrypt/keywrap/jwe"
+ "github.com/containers/ocicrypt/keywrap/pgp"
+ "github.com/containers/ocicrypt/keywrap/pkcs7"
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// EncryptLayerFinalizer is a finalizer run to return the annotations to set for
+// the encrypted layer
+type EncryptLayerFinalizer func() (map[string]string, error)
+
+func init() {
+ keyWrappers = make(map[string]keywrap.KeyWrapper)
+ keyWrapperAnnotations = make(map[string]string)
+ RegisterKeyWrapper("pgp", pgp.NewKeyWrapper())
+ RegisterKeyWrapper("jwe", jwe.NewKeyWrapper())
+ RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper())
+}
+
+var keyWrappers map[string]keywrap.KeyWrapper
+var keyWrapperAnnotations map[string]string
+
+// RegisterKeyWrapper allows to register key wrappers by their encryption scheme
+func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) {
+ keyWrappers[scheme] = iface
+ keyWrapperAnnotations[iface.GetAnnotationID()] = scheme
+}
+
+// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe)
+func GetKeyWrapper(scheme string) keywrap.KeyWrapper {
+ return keyWrappers[scheme]
+}
+
+// GetWrappedKeysMap returns a map of wrappedKeys as values in a
+// map with the encryption scheme(s) as the key(s)
+func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string {
+ wrappedKeysMap := make(map[string]string)
+
+ for annotationsID, scheme := range keyWrapperAnnotations {
+ if annotation, ok := desc.Annotations[annotationsID]; ok {
+ wrappedKeysMap[scheme] = annotation
+ }
+ }
+ return wrappedKeysMap
+}
+
+// EncryptLayer encrypts the layer by running one encryptor after the other
+func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) {
+ var (
+ encLayerReader io.Reader
+ err error
+ encrypted bool
+ bcFin blockcipher.Finalizer
+ privOptsData []byte
+ pubOptsData []byte
+ )
+
+ if ec == nil {
+ return nil, nil, errors.New("EncryptConfig must not be nil")
+ }
+
+ for annotationsID := range keyWrapperAnnotations {
+ annotation := desc.Annotations[annotationsID]
+ if annotation != "" {
+ privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc)
+ if err != nil {
+ return nil, nil, err
+ }
+ pubOptsData, err = getLayerPubOpts(desc)
+ if err != nil {
+ return nil, nil, err
+ }
+ // already encrypted!
+ encrypted = true
+ }
+ }
+
+ if !encrypted {
+ encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ encLayerFinalizer := func() (map[string]string, error) {
+ // If layer was already encrypted, bcFin should be nil, use existing optsData
+ if bcFin != nil {
+ opts, err := bcFin()
+ if err != nil {
+ return nil, err
+ }
+ privOptsData, err = json.Marshal(opts.Private)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not JSON marshal opts")
+ }
+ pubOptsData, err = json.Marshal(opts.Public)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not JSON marshal opts")
+ }
+ }
+
+ newAnnotations := make(map[string]string)
+ for annotationsID, scheme := range keyWrapperAnnotations {
+ b64Annotations := desc.Annotations[annotationsID]
+ keywrapper := GetKeyWrapper(scheme)
+ b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData)
+ if err != nil {
+ return nil, err
+ }
+ if b64Annotations != "" {
+ newAnnotations[annotationsID] = b64Annotations
+ }
+ }
+
+ newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData)
+
+ if len(newAnnotations) == 0 {
+ return nil, errors.New("no encryptor found to handle encryption")
+ }
+
+ return newAnnotations, err
+ }
+
+ // if nothing was encrypted, we just return encLayer = nil
+ return encLayerReader, encLayerFinalizer, err
+
+}
+
+// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the
+// annotation data
+func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) {
+ newAnnotation, err := keywrapper.WrapKeys(ec, optsData)
+ if err != nil || len(newAnnotation) == 0 {
+ return b64Annotations, err
+ }
+ b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation)
+ if b64Annotations == "" {
+ return b64newAnnotation, nil
+ }
+ return b64Annotations + "," + b64newAnnotation, nil
+}
+
+// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it
+// can apply the provided private key
+// If unwrapOnly is set we will only try to decrypt the layer encryption key and return
+func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) {
+ if dc == nil {
+ return nil, "", errors.New("DecryptConfig must not be nil")
+ }
+ privOptsData, err := decryptLayerKeyOptsData(dc, desc)
+ if err != nil || unwrapOnly {
+ return nil, "", err
+ }
+
+ var pubOptsData []byte
+ pubOptsData, err = getLayerPubOpts(desc)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData)
+}
+
+func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) {
+ privKeyGiven := false
+ for annotationsID, scheme := range keyWrapperAnnotations {
+ b64Annotation := desc.Annotations[annotationsID]
+ if b64Annotation != "" {
+ keywrapper := GetKeyWrapper(scheme)
+
+ if len(keywrapper.GetPrivateKeys(dc.Parameters)) == 0 {
+ continue
+ }
+ privKeyGiven = true
+
+ optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation)
+ if err != nil {
+ // try next keywrap.KeyWrapper
+ continue
+ }
+ if optsData == nil {
+ // try next keywrap.KeyWrapper
+ continue
+ }
+ return optsData, nil
+ }
+ }
+ if !privKeyGiven {
+ return nil, errors.New("missing private key needed for decryption")
+ }
+ return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption")
+}
+
+func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) {
+ pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"]
+ if pubOptsString == "" {
+ return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{})
+ }
+ return base64.StdEncoding.DecodeString(pubOptsString)
+}
+
+// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function
+// of the given keywrapper with it and returns the result in case the Unwrap functions
+// does not return an error. If all attempts fail, an error is returned.
+func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) {
+ if b64Annotations == "" {
+ return nil, nil
+ }
+ for _, b64Annotation := range strings.Split(b64Annotations, ",") {
+ annotation, err := base64.StdEncoding.DecodeString(b64Annotation)
+ if err != nil {
+ return nil, errors.New("could not base64 decode the annotation")
+ }
+ optsData, err := keywrapper.UnwrapKey(dc, annotation)
+ if err != nil {
+ continue
+ }
+ return optsData, nil
+ }
+ return nil, errors.New("no suitable key found for decrypting layer key")
+}
+
+// commonEncryptLayer is a function to encrypt the plain layer using a new random
+// symmetric key and return the LayerBlockCipherHandler's JSON in string form for
+// later use during decryption
+func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) {
+ lbch, err := blockcipher.NewLayerBlockCipherHandler()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) {
+ lbco, err := bcFin()
+ if err != nil {
+ return blockcipher.LayerBlockCipherOptions{}, err
+ }
+ lbco.Private.Digest = d
+ return lbco, nil
+ }
+
+ return encLayerReader, newBcFin, err
+}
+
+// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer
+// by passing along the optsData
+func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) {
+ privOpts := blockcipher.PrivateLayerBlockCipherOptions{}
+ err := json.Unmarshal(privOptsData, &privOpts)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "could not JSON unmarshal privOptsData")
+ }
+
+ lbch, err := blockcipher.NewLayerBlockCipherHandler()
+ if err != nil {
+ return nil, "", err
+ }
+
+ pubOpts := blockcipher.PublicLayerBlockCipherOptions{}
+ if len(pubOptsData) > 0 {
+ err := json.Unmarshal(pubOptsData, &pubOpts)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "could not JSON unmarshal pubOptsData")
+ }
+ }
+
+ opts := blockcipher.LayerBlockCipherOptions{
+ Private: privOpts,
+ Public: pubOpts,
+ }
+
+ plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts)
+ if err != nil {
+ return nil, "", err
+ }
+
+ return plainLayerReader, opts.Private.Digest, nil
+}
+
+// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace'
+// and returns a map with those taken out
+func FilterOutAnnotations(annotations map[string]string) map[string]string {
+ a := make(map[string]string)
+ if len(annotations) > 0 {
+ for k, v := range annotations {
+ if strings.HasPrefix(k, "org.opencontainers.image.enc.") {
+ continue
+ }
+ a[k] = v
+ }
+ }
+ return a
+}
diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod
new file mode 100644
index 000000000..214496e05
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/go.mod
@@ -0,0 +1,18 @@
+module github.com/containers/ocicrypt
+
+go 1.12
+
+require (
+ github.com/containerd/containerd v1.2.10
+ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
+ github.com/opencontainers/go-digest v1.0.0-rc1
+ github.com/opencontainers/image-spec v1.0.1
+ github.com/pkg/errors v0.8.1
+ github.com/sirupsen/logrus v1.4.2 // indirect
+ github.com/stretchr/testify v1.3.0 // indirect
+ github.com/urfave/cli v1.22.1
+ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
+ google.golang.org/grpc v1.24.0 // indirect
+ gopkg.in/square/go-jose.v2 v2.3.1
+ gotest.tools v2.2.0+incompatible // indirect
+)
diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum
new file mode 100644
index 000000000..d4c40e3ae
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/go.sum
@@ -0,0 +1,73 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/containerd v1.2.10 h1:liQDhXqIn7y6cJ/7qBgOaZsiTZJc56/wkkhDBiDBRDw=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go
new file mode 100644
index 000000000..44cafae0c
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/gpg.go
@@ -0,0 +1,425 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package ocicrypt
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+// GPGVersion enum representing the GPG client version to use.
+type GPGVersion int
+
+const (
+ // GPGv2 signifies gpgv2+
+ GPGv2 GPGVersion = iota
+ // GPGv1 signifies gpgv1+
+ GPGv1
+ // GPGVersionUndetermined signifies gpg client version undetermined
+ GPGVersionUndetermined
+)
+
+// GPGClient defines an interface for wrapping the gpg command line tools
+type GPGClient interface {
+ // ReadGPGPubRingFile gets the byte sequence of the gpg public keyring
+ ReadGPGPubRingFile() ([]byte, error)
+ // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase
+ GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error)
+ // GetSecretKeyDetails gets the details of a secret key
+ GetSecretKeyDetails(keyid uint64) ([]byte, bool, error)
+ // GetKeyDetails gets the details of a public key
+ GetKeyDetails(keyid uint64) ([]byte, bool, error)
+ // ResolveRecipients resolves PGP key ids to user names
+ ResolveRecipients([]string) []string
+}
+
+// gpgClient contains generic gpg client information
+type gpgClient struct {
+ gpgHomeDir string
+}
+
+// gpgv2Client is a gpg2 client
+type gpgv2Client struct {
+ gpgClient
+}
+
+// gpgv1Client is a gpg client
+type gpgv1Client struct {
+ gpgClient
+}
+
+// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if
+// not defaults to regular gpg.
+func GuessGPGVersion() GPGVersion {
+ if err := exec.Command("gpg2", "--version").Run(); err == nil {
+ return GPGv2
+ } else if err := exec.Command("gpg", "--version").Run(); err == nil {
+ return GPGv1
+ } else {
+ return GPGVersionUndetermined
+ }
+}
+
+// NewGPGClient creates a new GPGClient object representing the given version
+// and using the given home directory
+func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) {
+ v := new(GPGVersion)
+ switch gpgVersion {
+ case "v1":
+ *v = GPGv1
+ case "v2":
+ *v = GPGv2
+ default:
+ v = nil
+ }
+ return newGPGClient(v, gpgHomeDir)
+}
+
+func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) {
+ var gpgVersion GPGVersion
+ if version != nil {
+ gpgVersion = *version
+ } else {
+ gpgVersion = GuessGPGVersion()
+ }
+
+ switch gpgVersion {
+ case GPGv1:
+ return &gpgv1Client{
+ gpgClient: gpgClient{gpgHomeDir: homedir},
+ }, nil
+ case GPGv2:
+ return &gpgv2Client{
+ gpgClient: gpgClient{gpgHomeDir: homedir},
+ }, nil
+ case GPGVersionUndetermined:
+ return nil, fmt.Errorf("unable to determine GPG version")
+ default:
+ return nil, fmt.Errorf("unhandled case: NewGPGClient")
+ }
+}
+
+// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase
+func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) {
+ var args []string
+
+ if gc.gpgHomeDir != "" {
+ args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
+ }
+
+ rfile, wfile, err := os.Pipe()
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not create pipe")
+ }
+ defer func() {
+ rfile.Close()
+ wfile.Close()
+ }()
+ // fill pipe in background
+ go func(passphrase string) {
+ _, _ = wfile.Write([]byte(passphrase))
+ wfile.Close()
+ }(passphrase)
+
+ args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...)
+
+ cmd := exec.Command("gpg2", args...)
+ cmd.ExtraFiles = []*os.File{rfile}
+
+ return runGPGGetOutput(cmd)
+}
+
+// ReadGPGPubRingFile reads the GPG public key ring file
+func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) {
+ var args []string
+
+ if gc.gpgHomeDir != "" {
+ args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
+ }
+ args = append(args, []string{"--batch", "--export"}...)
+
+ cmd := exec.Command("gpg2", args...)
+
+ return runGPGGetOutput(cmd)
+}
+
+func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) {
+ var args []string
+
+ if gc.gpgHomeDir != "" {
+ args = append([]string{"--homedir", gc.gpgHomeDir})
+ }
+ args = append(args, option, fmt.Sprintf("0x%x", keyid))
+
+ cmd := exec.Command("gpg2", args...)
+
+ keydata, err := runGPGGetOutput(cmd)
+ return keydata, err == nil, err
+}
+
+// GetSecretKeyDetails retrives the secret key details of key with keyid.
+// returns a byte array of the details and a bool if the key exists
+func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) {
+ return gc.getKeyDetails("-K", keyid)
+}
+
+// GetKeyDetails retrives the public key details of key with keyid.
+// returns a byte array of the details and a bool if the key exists
+func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) {
+ return gc.getKeyDetails("-k", keyid)
+}
+
+// ResolveRecipients converts PGP keyids to email addresses, if possible
+func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string {
+ return resolveRecipients(gc, recipients)
+}
+
+// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase
+func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) {
+ var args []string
+
+ if gc.gpgHomeDir != "" {
+ args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
+ }
+ args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...)
+
+ cmd := exec.Command("gpg", args...)
+
+ return runGPGGetOutput(cmd)
+}
+
+// ReadGPGPubRingFile reads the GPG public key ring file
+func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) {
+ var args []string
+
+ if gc.gpgHomeDir != "" {
+ args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
+ }
+ args = append(args, []string{"--batch", "--export"}...)
+
+ cmd := exec.Command("gpg", args...)
+
+ return runGPGGetOutput(cmd)
+}
+
+func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) {
+ var args []string
+
+ if gc.gpgHomeDir != "" {
+ args = append([]string{"--homedir", gc.gpgHomeDir})
+ }
+ args = append(args, option, fmt.Sprintf("0x%x", keyid))
+
+ cmd := exec.Command("gpg", args...)
+
+ keydata, err := runGPGGetOutput(cmd)
+
+ return keydata, err == nil, err
+}
+
+// GetSecretKeyDetails retrives the secret key details of key with keyid.
+// returns a byte array of the details and a bool if the key exists
+func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) {
+ return gc.getKeyDetails("-K", keyid)
+}
+
+// GetKeyDetails retrives the public key details of key with keyid.
+// returns a byte array of the details and a bool if the key exists
+func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) {
+ return gc.getKeyDetails("-k", keyid)
+}
+
+// ResolveRecipients converts PGP keyids to email addresses, if possible
+func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string {
+ return resolveRecipients(gc, recipients)
+}
+
+// runGPGGetOutput runs the GPG commandline and returns stdout as byte array
+// and any stderr in the error
+func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) {
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+ stderr, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, err
+ }
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ stdoutstr, err2 := ioutil.ReadAll(stdout)
+ stderrstr, _ := ioutil.ReadAll(stderr)
+
+ if err := cmd.Wait(); err != nil {
+ return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr))
+ }
+
+ return stdoutstr, err2
+}
+
+// resolveRecipients walks the list of recipients and attempts to convert
+// all keyIds to email addresses; if something goes wrong during the
+// conversion of a recipient, the original string is returned for that
+// recpient
+func resolveRecipients(gc GPGClient, recipients []string) []string {
+ var result []string
+
+ for _, recipient := range recipients {
+ keyID, err := strconv.ParseUint(recipient, 0, 64)
+ if err != nil {
+ result = append(result, recipient)
+ } else {
+ details, found, _ := gc.GetKeyDetails(keyID)
+ if !found {
+ result = append(result, recipient)
+ } else {
+ email := extractEmailFromDetails(details)
+ if email == "" {
+ result = append(result, recipient)
+ } else {
+ result = append(result, email)
+ }
+ }
+ }
+ }
+ return result
+}
+
+var emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P<email>.+)>`)
+
+func extractEmailFromDetails(details []byte) string {
+ loc := emailPattern.FindSubmatchIndex(details)
+ if len(loc) == 0 {
+ return ""
+ }
+ return string(emailPattern.Expand(nil, []byte("$email"), details, loc))
+}
+
+// uint64ToStringArray converts an array of uint64's to an array of strings
+// by applying a format string to each uint64
+func uint64ToStringArray(format string, in []uint64) []string {
+ var ret []string
+
+ for _, v := range in {
+ ret = append(ret, fmt.Sprintf(format, v))
+ }
+ return ret
+}
+
+// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the
+// wrapped symmetric keys. For this it determines whether a private key is
+// in the GPGVault or on this system and prompts for the passwords for those
+// that are available. If we do not find a private key on the system for
+// getting to the symmetric key of a layer then an error is generated.
+func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) {
+ // PrivateKeyData describes a private key
+ type PrivateKeyData struct {
+ KeyData []byte
+ KeyDataPassword []byte
+ }
+ var pkd PrivateKeyData
+ keyIDPasswordMap := make(map[uint64]PrivateKeyData)
+
+ for _, desc := range descs {
+ for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) {
+ if scheme != "pgp" {
+ continue
+ }
+ keywrapper := GetKeyWrapper(scheme)
+ if keywrapper == nil {
+ return nil, nil, errors.Errorf("could not get KeyWrapper for %s\n", scheme)
+ }
+ keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ found := false
+ for _, keyid := range keyIds {
+ // do we have this key? -- first check the vault
+ if gpgVault != nil {
+ _, keydata := gpgVault.GetGPGPrivateKey(keyid)
+ if len(keydata) > 0 {
+ pkd = PrivateKeyData{
+ KeyData: keydata,
+ KeyDataPassword: nil, // password not supported in this case
+ }
+ keyIDPasswordMap[keyid] = pkd
+ found = true
+ break
+ }
+ } else if gpgClient != nil {
+ // check the local system's gpg installation
+ keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid)
+ // this may fail if the key is not here; we ignore the error
+ if !haveKey {
+ // key not on this system
+ continue
+ }
+
+ _, found = keyIDPasswordMap[keyid]
+ if !found {
+ fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo))
+ fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid)
+
+ password, err := terminal.ReadPassword(int(os.Stdin.Fd()))
+ fmt.Printf("\n")
+ if err != nil {
+ return nil, nil, err
+ }
+ keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password))
+ if err != nil {
+ return nil, nil, err
+ }
+ pkd = PrivateKeyData{
+ KeyData: keydata,
+ KeyDataPassword: password,
+ }
+ keyIDPasswordMap[keyid] = pkd
+ found = true
+ }
+ break
+ } else {
+ return nil, nil, errors.New("no GPGVault or GPGClient passed")
+ }
+ }
+ if !found && len(b64pgpPackets) > 0 && mustFindKey {
+ ids := uint64ToStringArray("0x%x", keyIds)
+
+ return nil, nil, errors.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", "))
+ }
+ }
+ }
+
+ for _, pkd := range keyIDPasswordMap {
+ gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData)
+ gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword)
+ }
+
+ return gpgPrivKeys, gpgPrivKeysPwds, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go
new file mode 100644
index 000000000..dd9a10007
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/gpgvault.go
@@ -0,0 +1,100 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package ocicrypt
+
+import (
+ "bytes"
+ "io/ioutil"
+
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/openpgp"
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+// GPGVault defines an interface for wrapping multiple secret key rings
+type GPGVault interface {
+ // AddSecretKeyRingData adds a secret keyring via its raw byte array
+ AddSecretKeyRingData(gpgSecretKeyRingData []byte) error
+ // AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays
+ AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error
+ // AddSecretKeyRingFiles adds secret keyrings given their filenames
+ AddSecretKeyRingFiles(filenames []string) error
+ // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase
+ GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte)
+}
+
+// gpgVault wraps an array of gpgSecretKeyRing
+type gpgVault struct {
+ entityLists []openpgp.EntityList
+ keyDataList [][]byte // the raw data original passed in
+}
+
+// NewGPGVault creates an empty GPGVault
+func NewGPGVault() GPGVault {
+ return &gpgVault{}
+}
+
+// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte
+// array read from the file must be passed and will be parsed by this function
+func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error {
+ // read the private keys
+ r := bytes.NewReader(gpgSecretKeyRingData)
+ entityList, err := openpgp.ReadKeyRing(r)
+ if err != nil {
+ return errors.Wrapf(err, "could not read keyring")
+ }
+ g.entityLists = append(g.entityLists, entityList)
+ g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData)
+ return nil
+}
+
+// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte
+// arrays read from files must be passed
+func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error {
+ for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray {
+ if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddSecretKeyRingFiles adds the secret key rings given their filenames
+func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error {
+ for _, filename := range filenames {
+ gpgSecretKeyRingData, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return err
+ }
+ err = g.AddSecretKeyRingData(gpgSecretKeyRingData)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase
+func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) {
+ for i, el := range g.entityLists {
+ decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications)
+ if len(decKeys) > 0 {
+ return decKeys, g.keyDataList[i]
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
new file mode 100644
index 000000000..5d1dde241
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
@@ -0,0 +1,132 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package jwe
+
+import (
+ "crypto/ecdsa"
+
+ "github.com/containers/ocicrypt/config"
+ "github.com/containers/ocicrypt/keywrap"
+ "github.com/containers/ocicrypt/utils"
+ "github.com/pkg/errors"
+ jose "gopkg.in/square/go-jose.v2"
+)
+
+type jweKeyWrapper struct {
+}
+
+func (kw *jweKeyWrapper) GetAnnotationID() string {
+ return "org.opencontainers.image.enc.keys.jwe"
+}
+
+// NewKeyWrapper returns a new key wrapping interface using jwe
+func NewKeyWrapper() keywrap.KeyWrapper {
+ return &jweKeyWrapper{}
+}
+
+// WrapKeys wraps the session key for recpients and encrypts the optsData, which
+// describe the symmetric key used for encrypting the layer
+func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
+ var joseRecipients []jose.Recipient
+
+ err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"])
+ if err != nil {
+ return nil, err
+ }
+ // no recipients is not an error...
+ if len(joseRecipients) == 0 {
+ return nil, nil
+ }
+
+ encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "jose.NewMultiEncrypter failed")
+ }
+ jwe, err := encrypter.Encrypt(optsData)
+ if err != nil {
+ return nil, errors.Wrapf(err, "JWE Encrypt failed")
+ }
+ return []byte(jwe.FullSerialize()), nil
+}
+
+func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) {
+ jwe, err := jose.ParseEncrypted(string(jweString))
+ if err != nil {
+ return nil, errors.New("jose.ParseEncrypted failed")
+ }
+
+ privKeys := kw.GetPrivateKeys(dc.Parameters)
+ if len(privKeys) == 0 {
+ return nil, errors.New("No private keys found for JWE decryption")
+ }
+ privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters)
+ if len(privKeysPasswords) != len(privKeys) {
+ return nil, errors.New("Private key password array length must be same as that of private keys")
+ }
+
+ for idx, privKey := range privKeys {
+ key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE")
+ if err != nil {
+ return nil, err
+ }
+ _, _, plain, err := jwe.DecryptMulti(key)
+ if err == nil {
+ return plain, nil
+ }
+ }
+ return nil, errors.New("JWE: No suitable private key found for decryption")
+}
+
+func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte {
+ return dcparameters["privkeys"]
+}
+
+func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte {
+ return dcparameters["privkeys-passwords"]
+}
+
+func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) {
+ return nil, nil
+}
+
+func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) {
+ return []string{"[jwe]"}, nil
+}
+
+func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error {
+ if len(pubKeys) == 0 {
+ return nil
+ }
+ for _, pubKey := range pubKeys {
+ key, err := utils.ParsePublicKey(pubKey, "JWE")
+ if err != nil {
+ return err
+ }
+
+ alg := jose.RSA_OAEP
+ switch key.(type) {
+ case *ecdsa.PublicKey:
+ alg = jose.ECDH_ES_A256KW
+ }
+
+ *joseRecipients = append(*joseRecipients, jose.Recipient{
+ Algorithm: alg,
+ Key: key,
+ })
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go
new file mode 100644
index 000000000..75fdf6886
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package keywrap
+
+import (
+ "github.com/containers/ocicrypt/config"
+)
+
+// KeyWrapper is the interface used for wrapping keys using
+// a specific encryption technology (pgp, jwe)
+type KeyWrapper interface {
+ WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error)
+ UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error)
+ GetAnnotationID() string
+ // GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation
+ // as in some key services, a private key may not be exportable (i.e. HSM)
+ GetPrivateKeys(dcparameters map[string][][]byte) [][]byte
+
+ // GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption
+ // schemes may not have a notion of key IDs
+ GetKeyIdsFromPacket(packet string) ([]uint64, error)
+
+ // GetRecipients (optional) gets a list of recipients. It is optional due to the validity of
+ // recipients in a particular encryptiong scheme
+ GetRecipients(packet string) ([]string, error)
+}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
new file mode 100644
index 000000000..ff70c2d65
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
@@ -0,0 +1,269 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package pgp
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/mail"
+ "strconv"
+ "strings"
+
+ "github.com/containers/ocicrypt/config"
+ "github.com/containers/ocicrypt/keywrap"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/openpgp"
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+type gpgKeyWrapper struct {
+}
+
+// NewKeyWrapper returns a new key wrapping interface for pgp
+func NewKeyWrapper() keywrap.KeyWrapper {
+ return &gpgKeyWrapper{}
+}
+
+var (
+ // GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption
+ GPGDefaultEncryptConfig = &packet.Config{
+ Rand: rand.Reader,
+ DefaultHash: crypto.SHA256,
+ DefaultCipher: packet.CipherAES256,
+ CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression
+ RSABits: 2048,
+ }
+)
+
+func (kw *gpgKeyWrapper) GetAnnotationID() string {
+ return "org.opencontainers.image.enc.keys.pgp"
+}
+
+// WrapKeys wraps the session key for recpients and encrypts the optsData, which
+// describe the symmetric key used for encrypting the layer
+func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
+ ciphertext := new(bytes.Buffer)
+ el, err := kw.createEntityList(ec)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to create entity list")
+ }
+ if len(el) == 0 {
+ // nothing to do -- not an error
+ return nil, nil
+ }
+
+ plaintextWriter, err := openpgp.Encrypt(ciphertext,
+ el, /*EntityList*/
+ nil, /* Sign*/
+ nil, /* FileHint */
+ GPGDefaultEncryptConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err = plaintextWriter.Write(optsData); err != nil {
+ return nil, err
+ } else if err = plaintextWriter.Close(); err != nil {
+ return nil, err
+ }
+ return ciphertext.Bytes(), err
+}
+
+// UnwrapKey unwraps the symmetric key with which the layer is encrypted
+// This symmetric key is encrypted in the PGP payload.
+func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) {
+ pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters)
+ if err != nil {
+ return nil, err
+ }
+
+ for idx, pgpPrivateKey := range pgpPrivateKeys {
+ r := bytes.NewBuffer(pgpPrivateKey)
+ entityList, err := openpgp.ReadKeyRing(r)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to parse private keys")
+ }
+
+ var prompt openpgp.PromptFunction
+ if len(pgpPrivateKeysPwd) > idx {
+ responded := false
+ prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) {
+ if responded {
+ return nil, fmt.Errorf("don't seem to have the right password")
+ }
+ responded = true
+ for _, key := range keys {
+ if key.PrivateKey != nil {
+ _ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx])
+ }
+ }
+ return pgpPrivateKeysPwd[idx], nil
+ }
+ }
+
+ r = bytes.NewBuffer(pgpPacket)
+ md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig)
+ if err != nil {
+ continue
+ }
+ // we get the plain key options back
+ optsData, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ continue
+ }
+ return optsData, nil
+ }
+ return nil, errors.New("PGP: No suitable key found to unwrap key")
+}
+
+// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds
+func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) {
+
+ var keyids []uint64
+ for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") {
+ pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not decode base64 encoded PGP packet")
+ }
+ newids, err := kw.getKeyIDs(pgpPacket)
+ if err != nil {
+ return nil, err
+ }
+ keyids = append(keyids, newids...)
+ }
+ return keyids, nil
+}
+
+// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs
+func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) {
+ var keyids []uint64
+
+ kbuf := bytes.NewBuffer(pgpPacket)
+ packets := packet.NewReader(kbuf)
+ParsePackets:
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break ParsePackets
+ }
+ if err != nil {
+ return []uint64{}, errors.Wrapf(err, "packets.Next() failed")
+ }
+ switch p := p.(type) {
+ case *packet.EncryptedKey:
+ keyids = append(keyids, p.KeyId)
+ case *packet.SymmetricallyEncrypted:
+ break ParsePackets
+ }
+ }
+ return keyids, nil
+}
+
+// GetRecipients converts the wrappedKeys to an array of recipients
+func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) {
+ keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets)
+ if err != nil {
+ return nil, err
+ }
+ var array []string
+ for _, keyid := range keyIds {
+ array = append(array, "0x"+strconv.FormatUint(keyid, 16))
+ }
+ return array, nil
+}
+
+func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte {
+ return dcparameters["gpg-privatekeys"]
+}
+
+func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) {
+
+ privKeys := kw.GetPrivateKeys(dcparameters)
+ if len(privKeys) == 0 {
+ return nil, nil, errors.New("GPG: Missing private key parameter")
+ }
+
+ return privKeys, dcparameters["gpg-privatekeys-passwords"], nil
+}
+
+// createEntityList creates the opengpg EntityList by reading the KeyRing
+// first and then filtering out recipients' keys
+func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) {
+ pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"]
+ if len(pgpPubringFile) == 0 {
+ return nil, nil
+ }
+ r := bytes.NewReader(pgpPubringFile[0])
+
+ entityList, err := openpgp.ReadKeyRing(r)
+ if err != nil {
+ return nil, err
+ }
+
+ gpgRecipients := ec.Parameters["gpg-recipients"]
+ if len(gpgRecipients) == 0 {
+ return nil, nil
+ }
+
+ rSet := make(map[string]int)
+ for _, r := range gpgRecipients {
+ rSet[string(r)] = 0
+ }
+
+ var filteredList openpgp.EntityList
+ for _, entity := range entityList {
+ for k := range entity.Identities {
+ addr, err := mail.ParseAddress(k)
+ if err != nil {
+ return nil, err
+ }
+ for _, r := range gpgRecipients {
+ recp := string(r)
+ if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 {
+ filteredList = append(filteredList, entity)
+ rSet[recp] = rSet[recp] + 1
+ }
+ }
+ }
+ }
+
+ // make sure we found keys for all the Recipients...
+ var buffer bytes.Buffer
+ notFound := false
+ buffer.WriteString("PGP: No key found for the following recipients: ")
+
+ for k, v := range rSet {
+ if v == 0 {
+ if notFound {
+ buffer.WriteString(", ")
+ }
+ buffer.WriteString(k)
+ notFound = true
+ }
+ }
+
+ if notFound {
+ return nil, errors.New(buffer.String())
+ }
+
+ return filteredList, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
new file mode 100644
index 000000000..2762b9777
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
@@ -0,0 +1,132 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package pkcs7
+
+import (
+ "crypto"
+ "crypto/x509"
+
+ "github.com/containers/ocicrypt/config"
+ "github.com/containers/ocicrypt/keywrap"
+ "github.com/containers/ocicrypt/utils"
+ "github.com/fullsailor/pkcs7"
+ "github.com/pkg/errors"
+)
+
+type pkcs7KeyWrapper struct {
+}
+
+// NewKeyWrapper returns a new key wrapping interface using jwe
+func NewKeyWrapper() keywrap.KeyWrapper {
+ return &pkcs7KeyWrapper{}
+}
+
+func (kw *pkcs7KeyWrapper) GetAnnotationID() string {
+ return "org.opencontainers.image.enc.keys.pkcs7"
+}
+
+// WrapKeys wraps the session key for recpients and encrypts the optsData, which
+// describe the symmetric key used for encrypting the layer
+func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
+ x509Certs, err := collectX509s(ec.Parameters["x509s"])
+ if err != nil {
+ return nil, err
+ }
+ // no recipients is not an error...
+ if len(x509Certs) == 0 {
+ return nil, nil
+ }
+
+ pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM
+ return pkcs7.Encrypt(optsData, x509Certs)
+}
+
+func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) {
+ if len(x509s) == 0 {
+ return nil, nil
+ }
+ var x509Certs []*x509.Certificate
+ for _, x509 := range x509s {
+ x509Cert, err := utils.ParseCertificate(x509, "PKCS7")
+ if err != nil {
+ return nil, err
+ }
+ x509Certs = append(x509Certs, x509Cert)
+ }
+ return x509Certs, nil
+}
+
+func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte {
+ return dcparameters["privkeys"]
+}
+
+func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte {
+ return dcparameters["privkeys-passwords"]
+}
+
+// UnwrapKey unwraps the symmetric key with which the layer is encrypted
+// This symmetric key is encrypted in the PKCS7 payload.
+func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) {
+ privKeys := kw.GetPrivateKeys(dc.Parameters)
+ if len(privKeys) == 0 {
+ return nil, errors.New("no private keys found for PKCS7 decryption")
+ }
+ privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters)
+ if len(privKeysPasswords) != len(privKeys) {
+ return nil, errors.New("private key password array length must be same as that of private keys")
+ }
+
+ x509Certs, err := collectX509s(dc.Parameters["x509s"])
+ if err != nil {
+ return nil, err
+ }
+ if len(x509Certs) == 0 {
+ return nil, errors.New("no x509 certificates found needed for PKCS7 decryption")
+ }
+
+ p7, err := pkcs7.Parse(pkcs7Packet)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not parse PKCS7 packet")
+ }
+
+ for idx, privKey := range privKeys {
+ key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7")
+ if err != nil {
+ return nil, err
+ }
+ for _, x509Cert := range x509Certs {
+ optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key))
+ if err != nil {
+ continue
+ }
+ return optsData, nil
+ }
+ }
+ return nil, errors.New("PKCS7: No suitable private key found for decryption")
+}
+
+// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds;
+// We cannot do this with pkcs7
+func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) {
+ return nil, nil
+}
+
+// GetRecipients converts the wrappedKeys to an array of recipients
+// We cannot do this with pkcs7
+func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) {
+ return []string{"[pkcs7]"}, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/reader.go b/vendor/github.com/containers/ocicrypt/reader.go
new file mode 100644
index 000000000..a93eec8e9
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/reader.go
@@ -0,0 +1,40 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package ocicrypt
+
+import (
+ "io"
+)
+
+type readerAtReader struct {
+ r io.ReaderAt
+ off int64
+}
+
+// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader
+func ReaderFromReaderAt(r io.ReaderAt) io.Reader {
+ return &readerAtReader{
+ r: r,
+ off: 0,
+ }
+}
+
+func (rar *readerAtReader) Read(p []byte) (n int, err error) {
+ n, err = rar.r.ReadAt(p, rar.off)
+ rar.off += int64(n)
+ return n, err
+}
diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go
new file mode 100644
index 000000000..330069d49
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/spec/spec.go
@@ -0,0 +1,12 @@
+package spec
+
+const (
+ // MediaTypeLayerEnc is MIME type used for encrypted layers.
+ MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted"
+ // MediaTypeLayerGzipEnc is MIME type used for encrypted compressed layers.
+ MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted"
+ // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers.
+ MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted"
+ // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted compressed layers.
+ MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted"
+)
diff --git a/vendor/github.com/containers/ocicrypt/utils/delayedreader.go b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go
new file mode 100644
index 000000000..3b939bdea
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go
@@ -0,0 +1,109 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+import (
+ "io"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// DelayedReader wraps a io.Reader and allows a client to use the Reader
+// interface. The DelayedReader holds back some buffer to the client
+// so that it can report any error that occurred on the Reader it wraps
+// early to the client while it may still have held some data back.
+type DelayedReader struct {
+ reader io.Reader // Reader to Read() bytes from and delay them
+ err error // error that occurred on the reader
+ buffer []byte // delay buffer
+ bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller
+ bufoff int // offset in the delay buffer to give to Read()
+}
+
+// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes
+func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader {
+ return &DelayedReader{
+ reader: reader,
+ buffer: make([]byte, bufsize),
+ }
+}
+
+// Read implements the io.Reader interface
+func (dr *DelayedReader) Read(p []byte) (int, error) {
+ if dr.err != nil && dr.err != io.EOF {
+ return 0, dr.err
+ }
+
+ // if we are completely drained, return io.EOF
+ if dr.err == io.EOF && dr.bufbytes == 0 {
+ return 0, io.EOF
+ }
+
+ // only at the beginning we fill our delay buffer in an extra step
+ if dr.bufbytes < len(dr.buffer) && dr.err == nil {
+ dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer)
+ if dr.err != nil && dr.err != io.EOF {
+ return 0, dr.err
+ }
+ }
+ // dr.err != nil means we have EOF and can drain the delay buffer
+ // otherwise we need to still read from the reader
+
+ var tmpbuf []byte
+ tmpbufbytes := 0
+ if dr.err == nil {
+ tmpbuf = make([]byte, len(p))
+ tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf)
+ if dr.err != nil && dr.err != io.EOF {
+ return 0, dr.err
+ }
+ }
+
+ // copy out of the delay buffer into 'p'
+ tocopy1 := min(len(p), dr.bufbytes)
+ c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:])
+ dr.bufoff += c1
+ dr.bufbytes -= c1
+
+ c2 := 0
+ // can p still hold more data?
+ if c1 < len(p) {
+ // copy out of the tmpbuf into 'p'
+ c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes])
+ }
+
+ // if tmpbuf holds data we need to hold onto, copy them
+ // into the delay buffer
+ if tmpbufbytes-c2 > 0 {
+ // left-shift the delay buffer and append the tmpbuf's remaining data
+ dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes]
+ dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...)
+ dr.bufoff = 0
+ dr.bufbytes = len(dr.buffer)
+ }
+
+ var err error
+ if dr.bufbytes == 0 {
+ err = io.EOF
+ }
+ return c1 + c2, err
+}
diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go
new file mode 100644
index 000000000..c360e0a33
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go
@@ -0,0 +1,31 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+import (
+ "io"
+)
+
+// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns
+// EOF if an EOF was encountered or any other error.
+func FillBuffer(reader io.Reader, buffer []byte) (int, error) {
+ n, err := io.ReadFull(reader, buffer)
+ if err == io.ErrUnexpectedEOF {
+ return n, io.EOF
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go
new file mode 100644
index 000000000..e2ed4b1d8
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/utils/testing.go
@@ -0,0 +1,166 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "math/big"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+// CreateRSAKey creates an RSA key
+func CreateRSAKey(bits int) (*rsa.PrivateKey, error) {
+ key, err := rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, errors.Wrap(err, "rsa.GenerateKey failed")
+ }
+ return key, nil
+}
+
+// CreateRSATestKey creates an RSA key of the given size and returns
+// the public and private key in PEM or DER format
+func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) {
+ key, err := CreateRSAKey(bits)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "x509.MarshalPKIXPublicKey failed")
+ }
+ privData := x509.MarshalPKCS1PrivateKey(key)
+
+ // no more encoding needed for DER
+ if !pemencode {
+ return pubData, privData, nil
+ }
+
+ publicKey := pem.EncodeToMemory(&pem.Block{
+ Type: "PUBLIC KEY",
+ Bytes: pubData,
+ })
+
+ var block *pem.Block
+
+ typ := "RSA PRIVATE KEY"
+ if len(password) > 0 {
+ block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "x509.EncryptPEMBlock failed")
+ }
+ } else {
+ block = &pem.Block{
+ Type: typ,
+ Bytes: privData,
+ }
+ }
+
+ privateKey := pem.EncodeToMemory(block)
+
+ return publicKey, privateKey, nil
+}
+
+// CreateECDSATestKey creates and elliptic curve key for the given curve and returns
+// the public and private key in DER format
+func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) {
+ key, err := ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "ecdsa.GenerateKey failed")
+ }
+
+ pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "x509.MarshalPKIXPublicKey failed")
+ }
+
+ privData, err := x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "x509.MarshalECPrivateKey failed")
+ }
+
+ return pubData, privData, nil
+}
+
+// CreateTestCA creates a root CA for testing
+func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) {
+ key, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "rsa.GenerateKey failed")
+ }
+
+ ca := &x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{
+ CommonName: "test-ca",
+ },
+ NotBefore: time.Now(),
+ NotAfter: time.Now().AddDate(1, 0, 0),
+ IsCA: true,
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ }
+ caCert, err := certifyKey(&key.PublicKey, ca, key, ca)
+
+ return key, caCert, err
+}
+
+// CertifyKey certifies a public key using the given CA's private key and cert;
+// The certificate template for the public key is optional
+func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) {
+ pubKey, err := ParsePublicKey(pubbytes, "CertifyKey")
+ if err != nil {
+ return nil, err
+ }
+ return certifyKey(pubKey, template, caKey, caCert)
+}
+
+func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) {
+ if template == nil {
+ template = &x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{
+ CommonName: "testkey",
+ },
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour),
+ IsCA: false,
+ KeyUsage: x509.KeyUsageDigitalSignature,
+ BasicConstraintsValid: true,
+ }
+ }
+
+ certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey)
+ if err != nil {
+ return nil, errors.Wrap(err, "x509.CreateCertificate failed")
+ }
+
+ cert, err := x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, errors.Wrap(err, "x509.ParseCertificate failed")
+ }
+
+ return cert, nil
+}
diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go
new file mode 100644
index 000000000..14eea38c1
--- /dev/null
+++ b/vendor/github.com/containers/ocicrypt/utils/utils.go
@@ -0,0 +1,220 @@
+/*
+ Copyright The ocicrypt Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package utils
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/openpgp"
+ json "gopkg.in/square/go-jose.v2"
+)
+
+// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key
+func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
+ jwk := json.JSONWebKey{}
+ err := jwk.UnmarshalJSON(privKey)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix)
+ }
+ if jwk.IsPublic() {
+ return nil, fmt.Errorf("%s: JWK is not a private key", prefix)
+ }
+ return &jwk, nil
+}
+
+// parseJWKPublicKey parses the input byte array as a JWK
+func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) {
+ jwk := json.JSONWebKey{}
+ err := jwk.UnmarshalJSON(privKey)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix)
+ }
+ if !jwk.IsPublic() {
+ return nil, fmt.Errorf("%s: JWK is not a public key", prefix)
+ }
+ return &jwk, nil
+}
+
+// IsPasswordError checks whether an error is related to a missing or wrong
+// password
+func IsPasswordError(err error) bool {
+ if err == nil {
+ return false
+ }
+ msg := strings.ToLower(err.Error())
+
+ return strings.Contains(msg, "password") &&
+ (strings.Contains(msg, "missing") || strings.Contains(msg, "wrong"))
+}
+
+// ParsePrivateKey tries to parse a private key in DER format first and
+// PEM format after, returning an error if the parsing failed
+func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) {
+ key, err := x509.ParsePKCS8PrivateKey(privKey)
+ if err != nil {
+ key, err = x509.ParsePKCS1PrivateKey(privKey)
+ if err != nil {
+ key, err = x509.ParseECPrivateKey(privKey)
+ }
+ }
+ if err != nil {
+ block, _ := pem.Decode(privKey)
+ if block != nil {
+ var der []byte
+ if x509.IsEncryptedPEMBlock(block) {
+ if privKeyPassword == nil {
+ return nil, errors.Errorf("%s: Missing password for encrypted private key", prefix)
+ }
+ der, err = x509.DecryptPEMBlock(block, privKeyPassword)
+ if err != nil {
+ return nil, errors.Errorf("%s: Wrong password: could not decrypt private key", prefix)
+ }
+ } else {
+ der = block.Bytes
+ }
+
+ key, err = x509.ParsePKCS8PrivateKey(der)
+ if err != nil {
+ key, err = x509.ParsePKCS1PrivateKey(der)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%s: Could not parse private key", prefix)
+ }
+ }
+ } else {
+ key, err = parseJWKPrivateKey(privKey, prefix)
+ }
+ }
+ return key, err
+}
+
+// IsPrivateKey returns true in case the given byte array represents a private key
+// It returns an error if for example the password is wrong
+func IsPrivateKey(data []byte, password []byte) (bool, error) {
+ _, err := ParsePrivateKey(data, password, "")
+ return err == nil, err
+}
+
+// ParsePublicKey tries to parse a public key in DER format first and
+// PEM format after, returning an error if the parsing failed
+func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) {
+ key, err := x509.ParsePKIXPublicKey(pubKey)
+ if err != nil {
+ block, _ := pem.Decode(pubKey)
+ if block != nil {
+ key, err = x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%s: Could not parse public key", prefix)
+ }
+ } else {
+ key, err = parseJWKPublicKey(pubKey, prefix)
+ }
+ }
+ return key, err
+}
+
+// IsPublicKey returns true in case the given byte array represents a public key
+func IsPublicKey(data []byte) bool {
+ _, err := ParsePublicKey(data, "")
+ return err == nil
+}
+
+// ParseCertificate tries to parse a public key in DER format first and
+// PEM format after, returning an error if the parsing failed
+func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) {
+ x509Cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ block, _ := pem.Decode(certBytes)
+ if block == nil {
+ return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix)
+ }
+ x509Cert, err = x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, errors.Wrapf(err, "%s: Could not parse x509 certificate", prefix)
+ }
+ }
+ return x509Cert, err
+}
+
+// IsCertificate returns true in case the given byte array represents an x.509 certificate
+func IsCertificate(data []byte) bool {
+ _, err := ParseCertificate(data, "")
+ return err == nil
+}
+
+// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file
+func IsGPGPrivateKeyRing(data []byte) bool {
+ r := bytes.NewBuffer(data)
+ _, err := openpgp.ReadKeyRing(r)
+ return err == nil
+}
+
+// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into
+// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509
+// certificate
+func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) {
+ dcparameters := make(map[string][][]byte)
+
+ for _, b64Item := range strings.Split(b64ItemList, ",") {
+ var password []byte
+ b64Data := strings.Split(b64Item, ":")
+ keyData, err := base64.StdEncoding.DecodeString(b64Data[0])
+ if err != nil {
+ return nil, errors.New("Could not base64 decode a passed decryption key")
+ }
+ if len(b64Data) == 2 {
+ password, err = base64.StdEncoding.DecodeString(b64Data[1])
+ if err != nil {
+ return nil, errors.New("Could not base64 decode a passed decryption key password")
+ }
+ }
+ var key string
+ isPrivKey, err := IsPrivateKey(keyData, password)
+ if IsPasswordError(err) {
+ return nil, err
+ }
+ if isPrivKey {
+ key = "privkeys"
+ if _, ok := dcparameters["privkeys-passwords"]; !ok {
+ dcparameters["privkeys-passwords"] = [][]byte{password}
+ } else {
+ dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password)
+ }
+ } else if IsCertificate(keyData) {
+ key = "x509s"
+ } else if IsGPGPrivateKeyRing(keyData) {
+ key = "gpg-privatekeys"
+ }
+ if key != "" {
+ values := dcparameters[key]
+ if values == nil {
+ dcparameters[key] = [][]byte{keyData}
+ } else {
+ dcparameters[key] = append(dcparameters[key], keyData)
+ }
+ } else {
+ return nil, errors.New("Unknown decryption key type")
+ }
+ }
+
+ return dcparameters, nil
+}
diff --git a/vendor/github.com/fullsailor/pkcs7/.gitignore b/vendor/github.com/fullsailor/pkcs7/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/fullsailor/pkcs7/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/fullsailor/pkcs7/.travis.yml b/vendor/github.com/fullsailor/pkcs7/.travis.yml
new file mode 100644
index 000000000..bc1204376
--- /dev/null
+++ b/vendor/github.com/fullsailor/pkcs7/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+ - 1.8
+ - 1.9
+ - "1.10"
+ - tip
diff --git a/vendor/github.com/fullsailor/pkcs7/LICENSE b/vendor/github.com/fullsailor/pkcs7/LICENSE
new file mode 100644
index 000000000..75f320908
--- /dev/null
+++ b/vendor/github.com/fullsailor/pkcs7/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Andrew Smith
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/fullsailor/pkcs7/README.md b/vendor/github.com/fullsailor/pkcs7/README.md
new file mode 100644
index 000000000..bfd948f32
--- /dev/null
+++ b/vendor/github.com/fullsailor/pkcs7/README.md
@@ -0,0 +1,8 @@
+# pkcs7
+
+[![GoDoc](https://godoc.org/github.com/fullsailor/pkcs7?status.svg)](https://godoc.org/github.com/fullsailor/pkcs7)
+[![Build Status](https://travis-ci.org/fullsailor/pkcs7.svg?branch=master)](https://travis-ci.org/fullsailor/pkcs7)
+
+pkcs7 implements parsing and creating signed and enveloped messages.
+
+- Documentation on [GoDoc](http://godoc.org/github.com/fullsailor/pkcs7)
diff --git a/vendor/github.com/fullsailor/pkcs7/ber.go b/vendor/github.com/fullsailor/pkcs7/ber.go
new file mode 100644
index 000000000..89e96d30c
--- /dev/null
+++ b/vendor/github.com/fullsailor/pkcs7/ber.go
@@ -0,0 +1,248 @@
+package pkcs7
+
+import (
+ "bytes"
+ "errors"
+)
+
+// var encodeIndent = 0
+
+type asn1Object interface {
+ EncodeTo(writer *bytes.Buffer) error
+}
+
+type asn1Structured struct {
+ tagBytes []byte
+ content []asn1Object
+}
+
+func (s asn1Structured) EncodeTo(out *bytes.Buffer) error {
+ //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes)
+ //encodeIndent++
+ inner := new(bytes.Buffer)
+ for _, obj := range s.content {
+ err := obj.EncodeTo(inner)
+ if err != nil {
+ return err
+ }
+ }
+ //encodeIndent--
+ out.Write(s.tagBytes)
+ encodeLength(out, inner.Len())
+ out.Write(inner.Bytes())
+ return nil
+}
+
+type asn1Primitive struct {
+ tagBytes []byte
+ length int
+ content []byte
+}
+
+func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error {
+ _, err := out.Write(p.tagBytes)
+ if err != nil {
+ return err
+ }
+ if err = encodeLength(out, p.length); err != nil {
+ return err
+ }
+ //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length)
+ //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content))
+ out.Write(p.content)
+
+ return nil
+}
+
+func ber2der(ber []byte) ([]byte, error) {
+ if len(ber) == 0 {
+ return nil, errors.New("ber2der: input ber is empty")
+ }
+ //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber))
+ out := new(bytes.Buffer)
+
+ obj, _, err := readObject(ber, 0)
+ if err != nil {
+ return nil, err
+ }
+ obj.EncodeTo(out)
+
+ // if offset < len(ber) {
+ // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber))
+ //}
+
+ return out.Bytes(), nil
+}
+
+// encodes lengths that are longer than 127 into string of bytes
+func marshalLongLength(out *bytes.Buffer, i int) (err error) {
+ n := lengthLength(i)
+
+ for ; n > 0; n-- {
+ err = out.WriteByte(byte(i >> uint((n-1)*8)))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+// computes the byte length of an encoded length value
+func lengthLength(i int) (numBytes int) {
+ numBytes = 1
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+ return
+}
+
+// encodes the length in DER format
+// If the length fits in 7 bits, the value is encoded directly.
+//
+// Otherwise, the number of bytes to encode the length is first determined.
+// This number is likely to be 4 or less for a 32bit length. This number is
+// added to 0x80. The length is encoded in big endian encoding follow after
+//
+// Examples:
+// length | byte 1 | bytes n
+// 0 | 0x00 | -
+// 120 | 0x78 | -
+// 200 | 0x81 | 0xC8
+// 500 | 0x82 | 0x01 0xF4
+//
+func encodeLength(out *bytes.Buffer, length int) (err error) {
+ if length >= 128 {
+ l := lengthLength(length)
+ err = out.WriteByte(0x80 | byte(l))
+ if err != nil {
+ return
+ }
+ err = marshalLongLength(out, length)
+ if err != nil {
+ return
+ }
+ } else {
+ err = out.WriteByte(byte(length))
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+func readObject(ber []byte, offset int) (asn1Object, int, error) {
+ //fmt.Printf("\n====> Starting readObject at offset: %d\n\n", offset)
+ tagStart := offset
+ b := ber[offset]
+ offset++
+ tag := b & 0x1F // last 5 bits
+ if tag == 0x1F {
+ tag = 0
+ for ber[offset] >= 0x80 {
+ tag = tag*128 + ber[offset] - 0x80
+ offset++
+ }
+ tag = tag*128 + ber[offset] - 0x80
+ offset++
+ }
+ tagEnd := offset
+
+ kind := b & 0x20
+ /*
+ if kind == 0 {
+ fmt.Print("--> Primitive\n")
+ } else {
+ fmt.Print("--> Constructed\n")
+ }
+ */
+ // read length
+ var length int
+ l := ber[offset]
+ offset++
+ indefinite := false
+ if l > 0x80 {
+ numberOfBytes := (int)(l & 0x7F)
+ if numberOfBytes > 4 { // int is only guaranteed to be 32bit
+ return nil, 0, errors.New("ber2der: BER tag length too long")
+ }
+ if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F {
+ return nil, 0, errors.New("ber2der: BER tag length is negative")
+ }
+ if 0x0 == (int)(ber[offset]) {
+ return nil, 0, errors.New("ber2der: BER tag length has leading zero")
+ }
+ //fmt.Printf("--> (compute length) indicator byte: %x\n", l)
+ //fmt.Printf("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes])
+ for i := 0; i < numberOfBytes; i++ {
+ length = length*256 + (int)(ber[offset])
+ offset++
+ }
+ } else if l == 0x80 {
+ indefinite = true
+ } else {
+ length = (int)(l)
+ }
+
+ //fmt.Printf("--> length : %d\n", length)
+ contentEnd := offset + length
+ if contentEnd > len(ber) {
+ return nil, 0, errors.New("ber2der: BER tag length is more than available data")
+ }
+ //fmt.Printf("--> content start : %d\n", offset)
+ //fmt.Printf("--> content end : %d\n", contentEnd)
+ //fmt.Printf("--> content : % X\n", ber[offset:contentEnd])
+ var obj asn1Object
+ if indefinite && kind == 0 {
+ return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding")
+ }
+ if kind == 0 {
+ obj = asn1Primitive{
+ tagBytes: ber[tagStart:tagEnd],
+ length: length,
+ content: ber[offset:contentEnd],
+ }
+ } else {
+ var subObjects []asn1Object
+ for (offset < contentEnd) || indefinite {
+ var subObj asn1Object
+ var err error
+ subObj, offset, err = readObject(ber, offset)
+ if err != nil {
+ return nil, 0, err
+ }
+ subObjects = append(subObjects, subObj)
+
+ if indefinite {
+ terminated, err := isIndefiniteTermination(ber, offset)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if terminated {
+ break
+ }
+ }
+ }
+ obj = asn1Structured{
+ tagBytes: ber[tagStart:tagEnd],
+ content: subObjects,
+ }
+ }
+
+ // Apply indefinite form length with 0x0000 terminator.
+ if indefinite {
+ contentEnd = offset + 2
+ }
+
+ return obj, contentEnd, nil
+}
+
+func isIndefiniteTermination(ber []byte, offset int) (bool, error) {
+ if len(ber) - offset < 2 {
+ return false, errors.New("ber2der: Invalid BER format")
+ }
+
+ return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil
+}
diff --git a/vendor/github.com/fullsailor/pkcs7/pkcs7.go b/vendor/github.com/fullsailor/pkcs7/pkcs7.go
new file mode 100644
index 000000000..0264466b4
--- /dev/null
+++ b/vendor/github.com/fullsailor/pkcs7/pkcs7.go
@@ -0,0 +1,962 @@
+// Package pkcs7 implements parsing and generation of some PKCS#7 structures.
+package pkcs7
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "math/big"
+ "sort"
+ "time"
+
+ _ "crypto/sha1" // for crypto.SHA1
+)
+
+// PKCS7 Represents a PKCS7 structure
+type PKCS7 struct {
+ Content []byte
+ Certificates []*x509.Certificate
+ CRLs []pkix.CertificateList
+ Signers []signerInfo
+ raw interface{}
+}
+
+type contentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ Content asn1.RawValue `asn1:"explicit,optional,tag:0"`
+}
+
+// ErrUnsupportedContentType is returned when a PKCS7 content is not supported.
+// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2),
+// and Enveloped Data are supported (1.2.840.113549.1.7.3)
+var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type")
+
+type unsignedData []byte
+
+var (
+ oidData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1}
+ oidSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}
+ oidEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3}
+ oidSignedAndEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 4}
+ oidDigestedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 5}
+ oidEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6}
+ oidAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3}
+ oidAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4}
+ oidAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5}
+)
+
+type signedData struct {
+ Version int `asn1:"default:1"`
+ DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"`
+ ContentInfo contentInfo
+ Certificates rawCertificates `asn1:"optional,tag:0"`
+ CRLs []pkix.CertificateList `asn1:"optional,tag:1"`
+ SignerInfos []signerInfo `asn1:"set"`
+}
+
+type rawCertificates struct {
+ Raw asn1.RawContent
+}
+
+type envelopedData struct {
+ Version int
+ RecipientInfos []recipientInfo `asn1:"set"`
+ EncryptedContentInfo encryptedContentInfo
+}
+
+type recipientInfo struct {
+ Version int
+ IssuerAndSerialNumber issuerAndSerial
+ KeyEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedKey []byte
+}
+
+type encryptedContentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedContent asn1.RawValue `asn1:"tag:0,optional"`
+}
+
+type attribute struct {
+ Type asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"set"`
+}
+
+type issuerAndSerial struct {
+ IssuerName asn1.RawValue
+ SerialNumber *big.Int
+}
+
+// MessageDigestMismatchError is returned when the signer data digest does not
+// match the computed digest for the contained content
+type MessageDigestMismatchError struct {
+ ExpectedDigest []byte
+ ActualDigest []byte
+}
+
+func (err *MessageDigestMismatchError) Error() string {
+ return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest)
+}
+
+type signerInfo struct {
+ Version int `asn1:"default:1"`
+ IssuerAndSerialNumber issuerAndSerial
+ DigestAlgorithm pkix.AlgorithmIdentifier
+ AuthenticatedAttributes []attribute `asn1:"optional,tag:0"`
+ DigestEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedDigest []byte
+ UnauthenticatedAttributes []attribute `asn1:"optional,tag:1"`
+}
+
+// Parse decodes a DER encoded PKCS7 package
+func Parse(data []byte) (p7 *PKCS7, err error) {
+ if len(data) == 0 {
+ return nil, errors.New("pkcs7: input data is empty")
+ }
+ var info contentInfo
+ der, err := ber2der(data)
+ if err != nil {
+ return nil, err
+ }
+ rest, err := asn1.Unmarshal(der, &info)
+ if len(rest) > 0 {
+ err = asn1.SyntaxError{Msg: "trailing data"}
+ return
+ }
+ if err != nil {
+ return
+ }
+
+ // fmt.Printf("--> Content Type: %s", info.ContentType)
+ switch {
+ case info.ContentType.Equal(oidSignedData):
+ return parseSignedData(info.Content.Bytes)
+ case info.ContentType.Equal(oidEnvelopedData):
+ return parseEnvelopedData(info.Content.Bytes)
+ }
+ return nil, ErrUnsupportedContentType
+}
+
+func parseSignedData(data []byte) (*PKCS7, error) {
+ var sd signedData
+ asn1.Unmarshal(data, &sd)
+ certs, err := sd.Certificates.Parse()
+ if err != nil {
+ return nil, err
+ }
+ // fmt.Printf("--> Signed Data Version %d\n", sd.Version)
+
+ var compound asn1.RawValue
+ var content unsignedData
+
+ // The Content.Bytes maybe empty on PKI responses.
+ if len(sd.ContentInfo.Content.Bytes) > 0 {
+ if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil {
+ return nil, err
+ }
+ }
+ // Compound octet string
+ if compound.IsCompound {
+ if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil {
+ return nil, err
+ }
+ } else {
+ // assuming this is tag 04
+ content = compound.Bytes
+ }
+ return &PKCS7{
+ Content: content,
+ Certificates: certs,
+ CRLs: sd.CRLs,
+ Signers: sd.SignerInfos,
+ raw: sd}, nil
+}
+
+func (raw rawCertificates) Parse() ([]*x509.Certificate, error) {
+ if len(raw.Raw) == 0 {
+ return nil, nil
+ }
+
+ var val asn1.RawValue
+ if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil {
+ return nil, err
+ }
+
+ return x509.ParseCertificates(val.Bytes)
+}
+
+func parseEnvelopedData(data []byte) (*PKCS7, error) {
+ var ed envelopedData
+ if _, err := asn1.Unmarshal(data, &ed); err != nil {
+ return nil, err
+ }
+ return &PKCS7{
+ raw: ed,
+ }, nil
+}
+
+// Verify checks the signatures of a PKCS7 object
+// WARNING: Verify does not check signing time or verify certificate chains at
+// this time.
+func (p7 *PKCS7) Verify() (err error) {
+ if len(p7.Signers) == 0 {
+ return errors.New("pkcs7: Message has no signers")
+ }
+ for _, signer := range p7.Signers {
+ if err := verifySignature(p7, signer); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func verifySignature(p7 *PKCS7, signer signerInfo) error {
+ signedData := p7.Content
+ hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm)
+ if err != nil {
+ return err
+ }
+ if len(signer.AuthenticatedAttributes) > 0 {
+ // TODO(fullsailor): First check the content type match
+ var digest []byte
+ err := unmarshalAttribute(signer.AuthenticatedAttributes, oidAttributeMessageDigest, &digest)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
+ h.Write(p7.Content)
+ computed := h.Sum(nil)
+ if !hmac.Equal(digest, computed) {
+ return &MessageDigestMismatchError{
+ ExpectedDigest: digest,
+ ActualDigest: computed,
+ }
+ }
+ // TODO(fullsailor): Optionally verify certificate chain
+ // TODO(fullsailor): Optionally verify signingTime against certificate NotAfter/NotBefore
+ signedData, err = marshalAttributes(signer.AuthenticatedAttributes)
+ if err != nil {
+ return err
+ }
+ }
+ cert := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
+ if cert == nil {
+ return errors.New("pkcs7: No certificate for signer")
+ }
+
+ algo := getSignatureAlgorithmFromAI(signer.DigestEncryptionAlgorithm)
+ if algo == x509.UnknownSignatureAlgorithm {
+ // I'm not sure what the spec here is, and the openssl sources were not
+ // helpful. But, this is what App Store receipts appear to do.
+ // The DigestEncryptionAlgorithm is just "rsaEncryption (PKCS #1)"
+ // But we're expecting a digest + encryption algorithm. So... we're going
+ // to determine an algorithm based on the DigestAlgorithm and this
+ // encryption algorithm.
+ if signer.DigestEncryptionAlgorithm.Algorithm.Equal(oidEncryptionAlgorithmRSA) {
+ algo = getRSASignatureAlgorithmForDigestAlgorithm(hash)
+ }
+ }
+ return cert.CheckSignature(algo, signedData, signer.EncryptedDigest)
+}
+
+func marshalAttributes(attrs []attribute) ([]byte, error) {
+ encodedAttributes, err := asn1.Marshal(struct {
+ A []attribute `asn1:"set"`
+ }{A: attrs})
+ if err != nil {
+ return nil, err
+ }
+
+ // Remove the leading sequence octets
+ var raw asn1.RawValue
+ asn1.Unmarshal(encodedAttributes, &raw)
+ return raw.Bytes, nil
+}
+
+var (
+ oidDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
+ oidEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+)
+
+func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate {
+ for _, cert := range certs {
+ if isCertMatchForIssuerAndSerial(cert, ias) {
+ return cert
+ }
+ }
+ return nil
+}
+
+func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) {
+ switch {
+ case oid.Equal(oidDigestAlgorithmSHA1):
+ return crypto.SHA1, nil
+ case oid.Equal(oidSHA256):
+ return crypto.SHA256, nil
+ }
+ return crypto.Hash(0), ErrUnsupportedAlgorithm
+}
+
+func getRSASignatureAlgorithmForDigestAlgorithm(hash crypto.Hash) x509.SignatureAlgorithm {
+ for _, details := range signatureAlgorithmDetails {
+ if details.pubKeyAlgo == x509.RSA && details.hash == hash {
+ return details.algo
+ }
+ }
+ return x509.UnknownSignatureAlgorithm
+}
+
+// GetOnlySigner returns an x509.Certificate for the first signer of the signed
+// data payload. If there are more or less than one signer, nil is returned
+func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
+ if len(p7.Signers) != 1 {
+ return nil
+ }
+ signer := p7.Signers[0]
+ return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
+}
+
+// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
+var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported")
+
+// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
+var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
+
+// Decrypt decrypts encrypted content info for recipient cert and private key
+func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pk crypto.PrivateKey) ([]byte, error) {
+ data, ok := p7.raw.(envelopedData)
+ if !ok {
+ return nil, ErrNotEncryptedContent
+ }
+ recipient := selectRecipientForCertificate(data.RecipientInfos, cert)
+ if recipient.EncryptedKey == nil {
+ return nil, errors.New("pkcs7: no enveloped recipient for provided certificate")
+ }
+ if priv := pk.(*rsa.PrivateKey); priv != nil {
+ var contentKey []byte
+ contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, priv, recipient.EncryptedKey)
+ if err != nil {
+ return nil, err
+ }
+ return data.EncryptedContentInfo.decrypt(contentKey)
+ }
+ fmt.Printf("Unsupported Private Key: %v\n", pk)
+ return nil, ErrUnsupportedAlgorithm
+}
+
+var oidEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
+var oidEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
+var oidEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
+var oidEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6}
+var oidEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2}
+
+func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
+ alg := eci.ContentEncryptionAlgorithm.Algorithm
+ if !alg.Equal(oidEncryptionAlgorithmDESCBC) &&
+ !alg.Equal(oidEncryptionAlgorithmDESEDE3CBC) &&
+ !alg.Equal(oidEncryptionAlgorithmAES256CBC) &&
+ !alg.Equal(oidEncryptionAlgorithmAES128CBC) &&
+ !alg.Equal(oidEncryptionAlgorithmAES128GCM) {
+ fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg)
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ // EncryptedContent can either be constructed of multple OCTET STRINGs
+ // or _be_ a tagged OCTET STRING
+ var cyphertext []byte
+ if eci.EncryptedContent.IsCompound {
+ // Complex case to concat all of the children OCTET STRINGs
+ var buf bytes.Buffer
+ cypherbytes := eci.EncryptedContent.Bytes
+ for {
+ var part []byte
+ cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part)
+ buf.Write(part)
+ if cypherbytes == nil {
+ break
+ }
+ }
+ cyphertext = buf.Bytes()
+ } else {
+ // Simple case, the bytes _are_ the cyphertext
+ cyphertext = eci.EncryptedContent.Bytes
+ }
+
+ var block cipher.Block
+ var err error
+
+ switch {
+ case alg.Equal(oidEncryptionAlgorithmDESCBC):
+ block, err = des.NewCipher(key)
+ case alg.Equal(oidEncryptionAlgorithmDESEDE3CBC):
+ block, err = des.NewTripleDESCipher(key)
+ case alg.Equal(oidEncryptionAlgorithmAES256CBC):
+ fallthrough
+ case alg.Equal(oidEncryptionAlgorithmAES128GCM), alg.Equal(oidEncryptionAlgorithmAES128CBC):
+ block, err = aes.NewCipher(key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if alg.Equal(oidEncryptionAlgorithmAES128GCM) {
+ params := aesGCMParameters{}
+ paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes
+
+ _, err := asn1.Unmarshal(paramBytes, &params)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(params.Nonce) != gcm.NonceSize() {
+ return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
+ }
+ if params.ICVLen != gcm.Overhead() {
+ return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
+ }
+
+ plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return plaintext, nil
+ }
+
+ iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
+ if len(iv) != block.BlockSize() {
+ return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
+ }
+ mode := cipher.NewCBCDecrypter(block, iv)
+ plaintext := make([]byte, len(cyphertext))
+ mode.CryptBlocks(plaintext, cyphertext)
+ if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil {
+ return nil, err
+ }
+ return plaintext, nil
+}
+
+func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo {
+ for _, recp := range recipients {
+ if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) {
+ return recp
+ }
+ }
+ return recipientInfo{}
+}
+
+func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool {
+ return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Compare(cert.RawIssuer, ias.IssuerName.FullBytes) == 0
+}
+
+func pad(data []byte, blocklen int) ([]byte, error) {
+ if blocklen < 1 {
+ return nil, fmt.Errorf("invalid blocklen %d", blocklen)
+ }
+ padlen := blocklen - (len(data) % blocklen)
+ if padlen == 0 {
+ padlen = blocklen
+ }
+ pad := bytes.Repeat([]byte{byte(padlen)}, padlen)
+ return append(data, pad...), nil
+}
+
+func unpad(data []byte, blocklen int) ([]byte, error) {
+ if blocklen < 1 {
+ return nil, fmt.Errorf("invalid blocklen %d", blocklen)
+ }
+ if len(data)%blocklen != 0 || len(data) == 0 {
+ return nil, fmt.Errorf("invalid data len %d", len(data))
+ }
+
+ // the last byte is the length of padding
+ padlen := int(data[len(data)-1])
+
+ // check padding integrity, all bytes should be the same
+ pad := data[len(data)-padlen:]
+ for _, padbyte := range pad {
+ if padbyte != byte(padlen) {
+ return nil, errors.New("invalid padding")
+ }
+ }
+
+ return data[:len(data)-padlen], nil
+}
+
+func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error {
+ for _, attr := range attrs {
+ if attr.Type.Equal(attributeType) {
+ _, err := asn1.Unmarshal(attr.Value.Bytes, out)
+ return err
+ }
+ }
+ return errors.New("pkcs7: attribute type not in attributes")
+}
+
+// UnmarshalSignedAttribute decodes a single attribute from the signer info
+func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error {
+ sd, ok := p7.raw.(signedData)
+ if !ok {
+ return errors.New("pkcs7: payload is not signedData content")
+ }
+ if len(sd.SignerInfos) < 1 {
+ return errors.New("pkcs7: payload has no signers")
+ }
+ attributes := sd.SignerInfos[0].AuthenticatedAttributes
+ return unmarshalAttribute(attributes, attributeType, out)
+}
+
+// SignedData is an opaque data structure for creating signed data payloads
+type SignedData struct {
+ sd signedData
+ certs []*x509.Certificate
+ messageDigest []byte
+}
+
+// Attribute represents a key value pair attribute. Value must be marshalable byte
+// `encoding/asn1`
+type Attribute struct {
+ Type asn1.ObjectIdentifier
+ Value interface{}
+}
+
+// SignerInfoConfig are optional values to include when adding a signer
+type SignerInfoConfig struct {
+ ExtraSignedAttributes []Attribute
+}
+
+// NewSignedData initializes a SignedData with content
+func NewSignedData(data []byte) (*SignedData, error) {
+ content, err := asn1.Marshal(data)
+ if err != nil {
+ return nil, err
+ }
+ ci := contentInfo{
+ ContentType: oidData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
+ }
+ digAlg := pkix.AlgorithmIdentifier{
+ Algorithm: oidDigestAlgorithmSHA1,
+ }
+ h := crypto.SHA1.New()
+ h.Write(data)
+ md := h.Sum(nil)
+ sd := signedData{
+ ContentInfo: ci,
+ Version: 1,
+ DigestAlgorithmIdentifiers: []pkix.AlgorithmIdentifier{digAlg},
+ }
+ return &SignedData{sd: sd, messageDigest: md}, nil
+}
+
+type attributes struct {
+ types []asn1.ObjectIdentifier
+ values []interface{}
+}
+
+// Add adds the attribute, maintaining insertion order
+func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) {
+ attrs.types = append(attrs.types, attrType)
+ attrs.values = append(attrs.values, value)
+}
+
+type sortableAttribute struct {
+ SortKey []byte
+ Attribute attribute
+}
+
+type attributeSet []sortableAttribute
+
+func (sa attributeSet) Len() int {
+ return len(sa)
+}
+
+func (sa attributeSet) Less(i, j int) bool {
+ return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0
+}
+
+func (sa attributeSet) Swap(i, j int) {
+ sa[i], sa[j] = sa[j], sa[i]
+}
+
+func (sa attributeSet) Attributes() []attribute {
+ attrs := make([]attribute, len(sa))
+ for i, attr := range sa {
+ attrs[i] = attr.Attribute
+ }
+ return attrs
+}
+
+func (attrs *attributes) ForMarshaling() ([]attribute, error) {
+ sortables := make(attributeSet, len(attrs.types))
+ for i := range sortables {
+ attrType := attrs.types[i]
+ attrValue := attrs.values[i]
+ asn1Value, err := asn1.Marshal(attrValue)
+ if err != nil {
+ return nil, err
+ }
+ attr := attribute{
+ Type: attrType,
+ Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag
+ }
+ encoded, err := asn1.Marshal(attr)
+ if err != nil {
+ return nil, err
+ }
+ sortables[i] = sortableAttribute{
+ SortKey: encoded,
+ Attribute: attr,
+ }
+ }
+ sort.Sort(sortables)
+ return sortables.Attributes(), nil
+}
+
+// AddSigner signs attributes about the content and adds certificate to payload
+func (sd *SignedData) AddSigner(cert *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
+ attrs := &attributes{}
+ attrs.Add(oidAttributeContentType, sd.sd.ContentInfo.ContentType)
+ attrs.Add(oidAttributeMessageDigest, sd.messageDigest)
+ attrs.Add(oidAttributeSigningTime, time.Now())
+ for _, attr := range config.ExtraSignedAttributes {
+ attrs.Add(attr.Type, attr.Value)
+ }
+ finalAttrs, err := attrs.ForMarshaling()
+ if err != nil {
+ return err
+ }
+ signature, err := signAttributes(finalAttrs, pkey, crypto.SHA1)
+ if err != nil {
+ return err
+ }
+
+ ias, err := cert2issuerAndSerial(cert)
+ if err != nil {
+ return err
+ }
+
+ signer := signerInfo{
+ AuthenticatedAttributes: finalAttrs,
+ DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidDigestAlgorithmSHA1},
+ DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidSignatureSHA1WithRSA},
+ IssuerAndSerialNumber: ias,
+ EncryptedDigest: signature,
+ Version: 1,
+ }
+ // create signature of signed attributes
+ sd.certs = append(sd.certs, cert)
+ sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
+ return nil
+}
+
+// AddCertificate adds the certificate to the payload. Useful for parent certificates
+func (sd *SignedData) AddCertificate(cert *x509.Certificate) {
+ sd.certs = append(sd.certs, cert)
+}
+
+// Detach removes content from the signed data struct to make it a detached signature.
+// This must be called right before Finish()
+func (sd *SignedData) Detach() {
+ sd.sd.ContentInfo = contentInfo{ContentType: oidData}
+}
+
+// Finish marshals the content and its signers
+func (sd *SignedData) Finish() ([]byte, error) {
+ sd.sd.Certificates = marshalCertificates(sd.certs)
+ inner, err := asn1.Marshal(sd.sd)
+ if err != nil {
+ return nil, err
+ }
+ outer := contentInfo{
+ ContentType: oidSignedData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true},
+ }
+ return asn1.Marshal(outer)
+}
+
+func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) {
+ var ias issuerAndSerial
+ // The issuer RDNSequence has to match exactly the sequence in the certificate
+ // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence
+ ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer}
+ ias.SerialNumber = cert.SerialNumber
+
+ return ias, nil
+}
+
+// signs the DER encoded form of the attributes with the private key
+func signAttributes(attrs []attribute, pkey crypto.PrivateKey, hash crypto.Hash) ([]byte, error) {
+ attrBytes, err := marshalAttributes(attrs)
+ if err != nil {
+ return nil, err
+ }
+ h := hash.New()
+ h.Write(attrBytes)
+ hashed := h.Sum(nil)
+ switch priv := pkey.(type) {
+ case *rsa.PrivateKey:
+ return rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA1, hashed)
+ }
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// concats and wraps the certificates in the RawValue structure
+func marshalCertificates(certs []*x509.Certificate) rawCertificates {
+ var buf bytes.Buffer
+ for _, cert := range certs {
+ buf.Write(cert.Raw)
+ }
+ rawCerts, _ := marshalCertificateBytes(buf.Bytes())
+ return rawCerts
+}
+
+// Even though, the tag & length are stripped out during marshalling the
+// RawContent, we have to encode it into the RawContent. If its missing,
+// then `asn1.Marshal()` will strip out the certificate wrapper instead.
+func marshalCertificateBytes(certs []byte) (rawCertificates, error) {
+ var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true}
+ b, err := asn1.Marshal(val)
+ if err != nil {
+ return rawCertificates{}, err
+ }
+ return rawCertificates{Raw: b}, nil
+}
+
+// DegenerateCertificate creates a signed data structure containing only the
+// provided certificate or certificate chain.
+func DegenerateCertificate(cert []byte) ([]byte, error) {
+ rawCert, err := marshalCertificateBytes(cert)
+ if err != nil {
+ return nil, err
+ }
+ emptyContent := contentInfo{ContentType: oidData}
+ sd := signedData{
+ Version: 1,
+ ContentInfo: emptyContent,
+ Certificates: rawCert,
+ CRLs: []pkix.CertificateList{},
+ }
+ content, err := asn1.Marshal(sd)
+ if err != nil {
+ return nil, err
+ }
+ signedContent := contentInfo{
+ ContentType: oidSignedData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
+ }
+ return asn1.Marshal(signedContent)
+}
+
+const (
+ EncryptionAlgorithmDESCBC = iota
+ EncryptionAlgorithmAES128GCM
+)
+
+// ContentEncryptionAlgorithm determines the algorithm used to encrypt the
+// plaintext message. Change the value of this variable to change which
+// algorithm is used in the Encrypt() function.
+var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC
+
+// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt
+// content with an unsupported algorithm.
+var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC and AES-128-GCM supported")
+
+const nonceSize = 12
+
+type aesGCMParameters struct {
+ Nonce []byte `asn1:"tag:4"`
+ ICVLen int
+}
+
+func encryptAES128GCM(content []byte) ([]byte, *encryptedContentInfo, error) {
+ // Create AES key and nonce
+ key := make([]byte, 16)
+ nonce := make([]byte, nonceSize)
+
+ _, err := rand.Read(key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ _, err = rand.Read(nonce)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Encrypt content
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ciphertext := gcm.Seal(nil, nonce, content, nil)
+
+ // Prepare ASN.1 Encrypted Content Info
+ paramSeq := aesGCMParameters{
+ Nonce: nonce,
+ ICVLen: gcm.Overhead(),
+ }
+
+ paramBytes, err := asn1.Marshal(paramSeq)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ eci := encryptedContentInfo{
+ ContentType: oidData,
+ ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: oidEncryptionAlgorithmAES128GCM,
+ Parameters: asn1.RawValue{
+ Tag: asn1.TagSequence,
+ Bytes: paramBytes,
+ },
+ },
+ EncryptedContent: marshalEncryptedContent(ciphertext),
+ }
+
+ return key, &eci, nil
+}
+
+func encryptDESCBC(content []byte) ([]byte, *encryptedContentInfo, error) {
+ // Create DES key & CBC IV
+ key := make([]byte, 8)
+ iv := make([]byte, des.BlockSize)
+ _, err := rand.Read(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ _, err = rand.Read(iv)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Encrypt padded content
+ block, err := des.NewCipher(key)
+ if err != nil {
+ return nil, nil, err
+ }
+ mode := cipher.NewCBCEncrypter(block, iv)
+ plaintext, err := pad(content, mode.BlockSize())
+ cyphertext := make([]byte, len(plaintext))
+ mode.CryptBlocks(cyphertext, plaintext)
+
+ // Prepare ASN.1 Encrypted Content Info
+ eci := encryptedContentInfo{
+ ContentType: oidData,
+ ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: oidEncryptionAlgorithmDESCBC,
+ Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
+ },
+ EncryptedContent: marshalEncryptedContent(cyphertext),
+ }
+
+ return key, &eci, nil
+}
+
+// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
+// recipient keys for each recipient public key.
+//
+// The algorithm used to perform encryption is determined by the current value
+// of the global ContentEncryptionAlgorithm package variable. By default, the
+// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the
+// value before calling Encrypt(). For example:
+//
+// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM
+//
+// TODO(fullsailor): Add support for encrypting content with other algorithms
+func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
+ var eci *encryptedContentInfo
+ var key []byte
+ var err error
+
+ // Apply chosen symmetric encryption method
+ switch ContentEncryptionAlgorithm {
+ case EncryptionAlgorithmDESCBC:
+ key, eci, err = encryptDESCBC(content)
+
+ case EncryptionAlgorithmAES128GCM:
+ key, eci, err = encryptAES128GCM(content)
+
+ default:
+ return nil, ErrUnsupportedEncryptionAlgorithm
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare each recipient's encrypted cipher key
+ recipientInfos := make([]recipientInfo, len(recipients))
+ for i, recipient := range recipients {
+ encrypted, err := encryptKey(key, recipient)
+ if err != nil {
+ return nil, err
+ }
+ ias, err := cert2issuerAndSerial(recipient)
+ if err != nil {
+ return nil, err
+ }
+ info := recipientInfo{
+ Version: 0,
+ IssuerAndSerialNumber: ias,
+ KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: oidEncryptionAlgorithmRSA,
+ },
+ EncryptedKey: encrypted,
+ }
+ recipientInfos[i] = info
+ }
+
+ // Prepare envelope content
+ envelope := envelopedData{
+ EncryptedContentInfo: *eci,
+ Version: 0,
+ RecipientInfos: recipientInfos,
+ }
+ innerContent, err := asn1.Marshal(envelope)
+ if err != nil {
+ return nil, err
+ }
+
+ // Prepare outer payload structure
+ wrapper := contentInfo{
+ ContentType: oidEnvelopedData,
+ Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
+ }
+
+ return asn1.Marshal(wrapper)
+}
+
+func marshalEncryptedContent(content []byte) asn1.RawValue {
+ asn1Content, _ := asn1.Marshal(content)
+ return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true}
+}
+
+func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) {
+ if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil {
+ return rsa.EncryptPKCS1v15(rand.Reader, pub, key)
+ }
+ return nil, ErrUnsupportedAlgorithm
+}
diff --git a/vendor/github.com/fullsailor/pkcs7/x509.go b/vendor/github.com/fullsailor/pkcs7/x509.go
new file mode 100644
index 000000000..195fd0e4b
--- /dev/null
+++ b/vendor/github.com/fullsailor/pkcs7/x509.go
@@ -0,0 +1,133 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the go/golang LICENSE file.
+
+package pkcs7
+
+// These are private constants and functions from the crypto/x509 package that
+// are useful when dealing with signatures verified by x509 certificates
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+)
+
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+
+ oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
+ oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
+ oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
+
+ oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8}
+
+ // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA
+ // but it's specified by ISO. Microsoft's makecert.exe has been known
+ // to produce certificates with this OID.
+ oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29}
+)
+
+var signatureAlgorithmDetails = []struct {
+ algo x509.SignatureAlgorithm
+ name string
+ oid asn1.ObjectIdentifier
+ pubKeyAlgo x509.PublicKeyAlgorithm
+ hash crypto.Hash
+}{
+ {x509.MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
+ {x509.MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
+ {x509.SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
+ {x509.SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
+ {x509.SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
+ {x509.SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
+ {x509.SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
+ {x509.SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA256},
+ {x509.SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA384},
+ {x509.SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA512},
+ {x509.DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
+ {x509.DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
+ {x509.ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
+ {x509.ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
+ {x509.ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
+ {x509.ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
+}
+
+// pssParameters reflects the parameters in an AlgorithmIdentifier that
+// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3
+type pssParameters struct {
+ // The following three fields are not marked as
+ // optional because the default values specify SHA-1,
+ // which is no longer suitable for use in signatures.
+ Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"`
+ MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"`
+ SaltLength int `asn1:"explicit,tag:2"`
+ TrailerField int `asn1:"optional,explicit,tag:3,default:1"`
+}
+
+// asn1.NullBytes is not available prior to Go 1.9
+var nullBytes = []byte{5, 0}
+
+func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) x509.SignatureAlgorithm {
+ if !ai.Algorithm.Equal(oidSignatureRSAPSS) {
+ for _, details := range signatureAlgorithmDetails {
+ if ai.Algorithm.Equal(details.oid) {
+ return details.algo
+ }
+ }
+ return x509.UnknownSignatureAlgorithm
+ }
+
+ // RSA PSS is special because it encodes important parameters
+ // in the Parameters.
+
+ var params pssParameters
+ if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, &params); err != nil {
+ return x509.UnknownSignatureAlgorithm
+ }
+
+ var mgf1HashFunc pkix.AlgorithmIdentifier
+ if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil {
+ return x509.UnknownSignatureAlgorithm
+ }
+
+ // PSS is greatly overburdened with options. This code forces
+ // them into three buckets by requiring that the MGF1 hash
+ // function always match the message hash function (as
+ // recommended in
+ // https://tools.ietf.org/html/rfc3447#section-8.1), that the
+ // salt length matches the hash length, and that the trailer
+ // field has the default value.
+ if !bytes.Equal(params.Hash.Parameters.FullBytes, nullBytes) ||
+ !params.MGF.Algorithm.Equal(oidMGF1) ||
+ !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
+ !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, nullBytes) ||
+ params.TrailerField != 1 {
+ return x509.UnknownSignatureAlgorithm
+ }
+
+ switch {
+ case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32:
+ return x509.SHA256WithRSAPSS
+ case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48:
+ return x509.SHA384WithRSAPSS
+ case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64:
+ return x509.SHA512WithRSAPSS
+ }
+
+ return x509.UnknownSignatureAlgorithm
+}
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
index b13a50ed1..dad29725f 100644
--- a/vendor/github.com/imdario/mergo/.travis.yml
+++ b/vendor/github.com/imdario/mergo/.travis.yml
@@ -4,4 +4,6 @@ install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
+ - go test -race -v ./...
+after_script:
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
index f8de6c543..3fb6c64d0 100644
--- a/vendor/github.com/imdario/mergo/merge.go
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -26,10 +26,12 @@ func hasExportedField(dst reflect.Value) (exported bool) {
}
type Config struct {
- Overwrite bool
- AppendSlice bool
- Transformers Transformers
- overwriteWithEmptyValue bool
+ Overwrite bool
+ AppendSlice bool
+ TypeCheck bool
+ Transformers Transformers
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
}
type Transformers interface {
@@ -41,7 +43,9 @@ type Transformers interface {
// short circuiting on recursive types.
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
config.overwriteWithEmptyValue = false
if !src.IsValid() {
@@ -128,11 +132,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dstSlice = reflect.ValueOf(dstElement.Interface())
}
- if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
dstSlice = srcSlice
} else if config.AppendSlice {
if srcSlice.Type() != dstSlice.Type() {
- return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
}
@@ -143,7 +150,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
continue
}
- if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
@@ -154,7 +161,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if !dst.CanSet() {
break
}
- if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
dst.Set(src)
} else if config.AppendSlice {
if src.Type() != dst.Type() {
@@ -168,11 +175,21 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if src.IsNil() {
break
}
- if src.Kind() != reflect.Interface {
+
+ if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
dst.Set(src)
}
+ }
+ break
+ }
+
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
} else if src.Kind() == reflect.Ptr {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
@@ -198,6 +215,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dst.Set(src)
}
}
+
return
}
@@ -209,7 +227,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error {
return merge(dst, src, opts...)
}
-// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
// non-empty src attribute values.
// Deprecated: use Merge(…) with WithOverride
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
@@ -228,11 +246,21 @@ func WithOverride(config *Config) {
config.Overwrite = true
}
-// WithAppendSlice will make merge append slices instead of overwriting it
+// WithOverride will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
func WithAppendSlice(config *Config) {
config.AppendSlice = true
}
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
func merge(dst, src interface{}, opts ...func(*Config)) error {
var (
vDst, vSrc reflect.Value
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
deleted file mode 100644
index 5597e026d..000000000
--- a/vendor/github.com/mattn/go-isatty/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-go:
- - tip
-
-os:
- - linux
- - osx
-
-before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
-script:
- - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
deleted file mode 100644
index 65dc692b6..000000000
--- a/vendor/github.com/mattn/go-isatty/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
-
-MIT License (Expat)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
deleted file mode 100644
index 1e69004bb..000000000
--- a/vendor/github.com/mattn/go-isatty/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# go-isatty
-
-[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
-[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty)
-[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
-[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
-
-isatty for golang
-
-## Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/mattn/go-isatty"
- "os"
-)
-
-func main() {
- if isatty.IsTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Terminal")
- } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Cygwin/MSYS2 Terminal")
- } else {
- fmt.Println("Is Not Terminal")
- }
-}
-```
-
-## Installation
-
-```
-$ go get github.com/mattn/go-isatty
-```
-
-## License
-
-MIT
-
-## Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
-
-## Thanks
-
-* k-takata: base idea for IsCygwinTerminal
-
- https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
deleted file mode 100644
index 17d4f90eb..000000000
--- a/vendor/github.com/mattn/go-isatty/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package isatty implements interface to isatty
-package isatty
diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod
deleted file mode 100644
index f310320c3..000000000
--- a/vendor/github.com/mattn/go-isatty/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/mattn/go-isatty
-
-require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223
diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum
deleted file mode 100644
index 426c8973c..000000000
--- a/vendor/github.com/mattn/go-isatty/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go
deleted file mode 100644
index d3567cb5b..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_android.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build android
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
deleted file mode 100644
index 07e93039d..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
deleted file mode 100644
index ff714a376..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_others.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build appengine js nacl
-
-package isatty
-
-// IsTerminal returns true if the file descriptor is terminal which
-// is always false on js and appengine classic which is a sandboxed PaaS.
-func IsTerminal(fd uintptr) bool {
- return false
-}
-
-// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
deleted file mode 100644
index bdd5c79a0..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build solaris
-// +build !appengine
-
-package isatty
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func IsTerminal(fd uintptr) bool {
- var termio unix.Termio
- err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
- return err == nil
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
deleted file mode 100644
index 453b025d0..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build linux aix
-// +build !appengine
-// +build !android
-
-package isatty
-
-import "golang.org/x/sys/unix"
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
- return err == nil
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
deleted file mode 100644
index af51cbcaa..000000000
--- a/vendor/github.com/mattn/go-isatty/isatty_windows.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// +build windows
-// +build !appengine
-
-package isatty
-
-import (
- "strings"
- "syscall"
- "unicode/utf16"
- "unsafe"
-)
-
-const (
- fileNameInfo uintptr = 2
- fileTypePipe = 3
-)
-
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
- procGetFileType = kernel32.NewProc("GetFileType")
-)
-
-func init() {
- // Check if GetFileInformationByHandleEx is available.
- if procGetFileInformationByHandleEx.Find() != nil {
- procGetFileInformationByHandleEx = nil
- }
-}
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
-
-// Check pipe name is used for cygwin/msys2 pty.
-// Cygwin/MSYS2 PTY has a name like:
-// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
-func isCygwinPipeName(name string) bool {
- token := strings.Split(name, "-")
- if len(token) < 5 {
- return false
- }
-
- if token[0] != `\msys` && token[0] != `\cygwin` {
- return false
- }
-
- if token[1] == "" {
- return false
- }
-
- if !strings.HasPrefix(token[2], "pty") {
- return false
- }
-
- if token[3] != `from` && token[3] != `to` {
- return false
- }
-
- if token[4] != "master" {
- return false
- }
-
- return true
-}
-
-// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
-// terminal.
-func IsCygwinTerminal(fd uintptr) bool {
- if procGetFileInformationByHandleEx == nil {
- return false
- }
-
- // Cygwin/msys's pty is a pipe.
- ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
- if ft != fileTypePipe || e != 0 {
- return false
- }
-
- var buf [2 + syscall.MAX_PATH]uint16
- r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
- 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
- uintptr(len(buf)*2), 0, 0)
- if r == 0 || e != 0 {
- return false
- }
-
- l := *(*uint32)(unsafe.Pointer(&buf))
- return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
-}
diff --git a/vendor/github.com/vbauerster/mpb/.travis.yml b/vendor/github.com/vbauerster/mpb/.travis.yml
deleted file mode 100644
index c982d1f90..000000000
--- a/vendor/github.com/vbauerster/mpb/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-sudo: false
-go:
- - 1.10.x
- - tip
-
-before_install:
- - go get -t -v ./...
-
-script:
- - go test -race -coverprofile=coverage.txt -covermode=atomic
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/vbauerster/mpb/LICENSE b/vendor/github.com/vbauerster/mpb/LICENSE
deleted file mode 100644
index 5e68ed21e..000000000
--- a/vendor/github.com/vbauerster/mpb/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-BSD 3-Clause License
-
-Copyright (C) 2016-2018 Vladimir Bauer
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-* Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/vbauerster/mpb/bar.go b/vendor/github.com/vbauerster/mpb/bar.go
deleted file mode 100644
index a304a87cb..000000000
--- a/vendor/github.com/vbauerster/mpb/bar.go
+++ /dev/null
@@ -1,399 +0,0 @@
-package mpb
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "strings"
- "sync"
- "time"
- "unicode/utf8"
-
- "github.com/vbauerster/mpb/decor"
-)
-
-// Bar represents a progress Bar
-type Bar struct {
- priority int
- index int
-
- runningBar *Bar
- cacheState *bState
- operateState chan func(*bState)
- int64Ch chan int64
- boolCh chan bool
- frameReaderCh chan *frameReader
- syncTableCh chan [][]chan int
-
- // done is closed by Bar's goroutine, after cacheState is written
- done chan struct{}
- // shutdown is closed from master Progress goroutine only
- shutdown chan struct{}
-}
-
-// Filler interface.
-// Bar renders by calling Filler's Fill method. You can literally have
-// any bar kind, by implementing this interface and passing it to the
-// Add method.
-type Filler interface {
- Fill(w io.Writer, width int, s *decor.Statistics)
-}
-
-// FillerFunc is function type adapter to convert function into Filler.
-type FillerFunc func(w io.Writer, width int, stat *decor.Statistics)
-
-func (f FillerFunc) Fill(w io.Writer, width int, stat *decor.Statistics) {
- f(w, width, stat)
-}
-
-type (
- bState struct {
- filler Filler
- id int
- width int
- alignment int
- total int64
- current int64
- trimSpace bool
- toComplete bool
- removeOnComplete bool
- barClearOnComplete bool
- completeFlushed bool
- aDecorators []decor.Decorator
- pDecorators []decor.Decorator
- amountReceivers []decor.AmountReceiver
- shutdownListeners []decor.ShutdownListener
- refill *refill
- bufP, bufB, bufA *bytes.Buffer
- bufNL *bytes.Buffer
- panicMsg string
- newLineExtendFn func(io.Writer, *decor.Statistics)
-
- // following options are assigned to the *Bar
- priority int
- runningBar *Bar
- }
- refill struct {
- r rune
- limit int64
- }
- frameReader struct {
- io.Reader
- extendedLines int
- toShutdown bool
- removeOnComplete bool
- }
-)
-
-func newBar(
- ctx context.Context,
- wg *sync.WaitGroup,
- filler Filler,
- id, width int,
- total int64,
- options ...BarOption,
-) *Bar {
-
- s := &bState{
- filler: filler,
- id: id,
- priority: id,
- width: width,
- total: total,
- }
-
- for _, opt := range options {
- if opt != nil {
- opt(s)
- }
- }
-
- s.bufP = bytes.NewBuffer(make([]byte, 0, s.width))
- s.bufB = bytes.NewBuffer(make([]byte, 0, s.width))
- s.bufA = bytes.NewBuffer(make([]byte, 0, s.width))
- if s.newLineExtendFn != nil {
- s.bufNL = bytes.NewBuffer(make([]byte, 0, s.width))
- }
-
- b := &Bar{
- priority: s.priority,
- runningBar: s.runningBar,
- operateState: make(chan func(*bState)),
- int64Ch: make(chan int64),
- boolCh: make(chan bool),
- frameReaderCh: make(chan *frameReader, 1),
- syncTableCh: make(chan [][]chan int),
- done: make(chan struct{}),
- shutdown: make(chan struct{}),
- }
-
- if b.runningBar != nil {
- b.priority = b.runningBar.priority
- }
-
- go b.serve(ctx, wg, s)
- return b
-}
-
-// RemoveAllPrependers removes all prepend functions.
-func (b *Bar) RemoveAllPrependers() {
- select {
- case b.operateState <- func(s *bState) { s.pDecorators = nil }:
- case <-b.done:
- }
-}
-
-// RemoveAllAppenders removes all append functions.
-func (b *Bar) RemoveAllAppenders() {
- select {
- case b.operateState <- func(s *bState) { s.aDecorators = nil }:
- case <-b.done:
- }
-}
-
-// ProxyReader wraps r with metrics required for progress tracking.
-func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
- if r == nil {
- panic("expect io.Reader, got nil")
- }
- rc, ok := r.(io.ReadCloser)
- if !ok {
- rc = ioutil.NopCloser(r)
- }
- return &proxyReader{rc, b, time.Now()}
-}
-
-// ID returs id of the bar.
-func (b *Bar) ID() int {
- select {
- case b.operateState <- func(s *bState) { b.int64Ch <- int64(s.id) }:
- return int(<-b.int64Ch)
- case <-b.done:
- return b.cacheState.id
- }
-}
-
-// Current returns bar's current number, in other words sum of all increments.
-func (b *Bar) Current() int64 {
- select {
- case b.operateState <- func(s *bState) { b.int64Ch <- s.current }:
- return <-b.int64Ch
- case <-b.done:
- return b.cacheState.current
- }
-}
-
-// SetTotal sets total dynamically.
-// Set complete to true, to trigger bar complete event now.
-func (b *Bar) SetTotal(total int64, complete bool) {
- select {
- case b.operateState <- func(s *bState) {
- s.total = total
- if complete && !s.toComplete {
- s.current = s.total
- s.toComplete = true
- }
- }:
- case <-b.done:
- }
-}
-
-// SetRefill sets refill, if supported by underlying Filler.
-func (b *Bar) SetRefill(amount int64) {
- b.operateState <- func(s *bState) {
- if f, ok := s.filler.(interface{ SetRefill(int64) }); ok {
- f.SetRefill(amount)
- }
- }
-}
-
-// Increment is a shorthand for b.IncrBy(1).
-func (b *Bar) Increment() {
- b.IncrBy(1)
-}
-
-// IncrBy increments progress bar by amount of n.
-// wdd is optional work duration i.e. time.Since(start), which expected
-// to be provided, if any ewma based decorator is used.
-func (b *Bar) IncrBy(n int, wdd ...time.Duration) {
- select {
- case b.operateState <- func(s *bState) {
- s.current += int64(n)
- if s.total > 0 && s.current >= s.total {
- s.current = s.total
- s.toComplete = true
- }
- for _, ar := range s.amountReceivers {
- ar.NextAmount(n, wdd...)
- }
- }:
- case <-b.done:
- }
-}
-
-// Completed reports whether the bar is in completed state.
-func (b *Bar) Completed() bool {
- // omit select here, because primary usage of the method is for loop
- // condition, like for !bar.Completed() {...} so when toComplete=true
- // it is called once (at which time, the bar is still alive), then
- // quits the loop and never suppose to be called afterwards.
- return <-b.boolCh
-}
-
-func (b *Bar) wSyncTable() [][]chan int {
- select {
- case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }:
- return <-b.syncTableCh
- case <-b.done:
- return b.cacheState.wSyncTable()
- }
-}
-
-func (b *Bar) serve(ctx context.Context, wg *sync.WaitGroup, s *bState) {
- defer wg.Done()
- cancel := ctx.Done()
- for {
- select {
- case op := <-b.operateState:
- op(s)
- case b.boolCh <- s.toComplete:
- case <-cancel:
- s.toComplete = true
- cancel = nil
- case <-b.shutdown:
- b.cacheState = s
- close(b.done)
- for _, sl := range s.shutdownListeners {
- sl.Shutdown()
- }
- return
- }
- }
-}
-
-func (b *Bar) render(debugOut io.Writer, tw int) {
- select {
- case b.operateState <- func(s *bState) {
- defer func() {
- // recovering if user defined decorator panics for example
- if p := recover(); p != nil {
- s.panicMsg = fmt.Sprintf("panic: %v", p)
- fmt.Fprintf(debugOut, "%s %s bar id %02d %v\n", "[mpb]", time.Now(), s.id, s.panicMsg)
- b.frameReaderCh <- &frameReader{
- Reader: strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", tw), s.panicMsg)),
- toShutdown: true,
- }
- }
- }()
- r := s.draw(tw)
- var extendedLines int
- if s.newLineExtendFn != nil {
- s.bufNL.Reset()
- s.newLineExtendFn(s.bufNL, newStatistics(s))
- extendedLines = countLines(s.bufNL.Bytes())
- r = io.MultiReader(r, s.bufNL)
- }
- b.frameReaderCh <- &frameReader{
- Reader: r,
- extendedLines: extendedLines,
- toShutdown: s.toComplete && !s.completeFlushed,
- removeOnComplete: s.removeOnComplete,
- }
- s.completeFlushed = s.toComplete
- }:
- case <-b.done:
- s := b.cacheState
- r := s.draw(tw)
- var extendedLines int
- if s.newLineExtendFn != nil {
- s.bufNL.Reset()
- s.newLineExtendFn(s.bufNL, newStatistics(s))
- extendedLines = countLines(s.bufNL.Bytes())
- r = io.MultiReader(r, s.bufNL)
- }
- b.frameReaderCh <- &frameReader{
- Reader: r,
- extendedLines: extendedLines,
- }
- }
-}
-
-func (s *bState) draw(termWidth int) io.Reader {
- if s.panicMsg != "" {
- return strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", termWidth), s.panicMsg))
- }
-
- stat := newStatistics(s)
-
- for _, d := range s.pDecorators {
- s.bufP.WriteString(d.Decor(stat))
- }
-
- for _, d := range s.aDecorators {
- s.bufA.WriteString(d.Decor(stat))
- }
-
- if s.barClearOnComplete && s.completeFlushed {
- s.bufA.WriteByte('\n')
- return io.MultiReader(s.bufP, s.bufA)
- }
-
- prependCount := utf8.RuneCount(s.bufP.Bytes())
- appendCount := utf8.RuneCount(s.bufA.Bytes())
-
- if !s.trimSpace {
- // reserve space for edge spaces
- termWidth -= 2
- s.bufB.WriteByte(' ')
- }
-
- if prependCount+s.width+appendCount > termWidth {
- s.filler.Fill(s.bufB, termWidth-prependCount-appendCount, stat)
- } else {
- s.filler.Fill(s.bufB, s.width, stat)
- }
-
- if !s.trimSpace {
- s.bufB.WriteByte(' ')
- }
-
- s.bufA.WriteByte('\n')
- return io.MultiReader(s.bufP, s.bufB, s.bufA)
-}
-
-func (s *bState) wSyncTable() [][]chan int {
- columns := make([]chan int, 0, len(s.pDecorators)+len(s.aDecorators))
- var pCount int
- for _, d := range s.pDecorators {
- if ok, ch := d.Syncable(); ok {
- columns = append(columns, ch)
- pCount++
- }
- }
- var aCount int
- for _, d := range s.aDecorators {
- if ok, ch := d.Syncable(); ok {
- columns = append(columns, ch)
- aCount++
- }
- }
- table := make([][]chan int, 2)
- table[0] = columns[0:pCount]
- table[1] = columns[pCount : pCount+aCount : pCount+aCount]
- return table
-}
-
-func newStatistics(s *bState) *decor.Statistics {
- return &decor.Statistics{
- ID: s.id,
- Completed: s.completeFlushed,
- Total: s.total,
- Current: s.current,
- }
-}
-
-func countLines(b []byte) int {
- return bytes.Count(b, []byte("\n"))
-}
diff --git a/vendor/github.com/vbauerster/mpb/bar_filler.go b/vendor/github.com/vbauerster/mpb/bar_filler.go
deleted file mode 100644
index 4e9285ca5..000000000
--- a/vendor/github.com/vbauerster/mpb/bar_filler.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package mpb
-
-import (
- "io"
- "unicode/utf8"
-
- "github.com/vbauerster/mpb/decor"
- "github.com/vbauerster/mpb/internal"
-)
-
-const (
- rLeft = iota
- rFill
- rTip
- rEmpty
- rRight
- rRevTip
- rRefill
-)
-
-var defaultBarStyle = "[=>-]<+"
-
-type barFiller struct {
- format [][]byte
- refillAmount int64
- reverse bool
-}
-
-func newDefaultBarFiller() Filler {
- bf := &barFiller{
- format: make([][]byte, utf8.RuneCountInString(defaultBarStyle)),
- }
- bf.setStyle(defaultBarStyle)
- return bf
-}
-
-func (s *barFiller) setStyle(style string) {
- if !utf8.ValidString(style) {
- return
- }
- src := make([][]byte, 0, utf8.RuneCountInString(style))
- for _, r := range style {
- src = append(src, []byte(string(r)))
- }
- copy(s.format, src)
-}
-
-func (s *barFiller) setReverse() {
- s.reverse = true
-}
-
-func (s *barFiller) SetRefill(amount int64) {
- s.refillAmount = amount
-}
-
-func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
-
- // don't count rLeft and rRight [brackets]
- width -= 2
- if width < 2 {
- return
- }
-
- w.Write(s.format[rLeft])
- if width == 2 {
- w.Write(s.format[rRight])
- return
- }
-
- bb := make([][]byte, width)
-
- cwidth := int(internal.Percentage(stat.Total, stat.Current, int64(width)))
-
- for i := 0; i < cwidth; i++ {
- bb[i] = s.format[rFill]
- }
-
- if s.refillAmount > 0 {
- var rwidth int
- if s.refillAmount > stat.Current {
- rwidth = cwidth
- } else {
- rwidth = int(internal.Percentage(stat.Total, int64(s.refillAmount), int64(width)))
- }
- for i := 0; i < rwidth; i++ {
- bb[i] = s.format[rRefill]
- }
- }
-
- if cwidth > 0 && cwidth < width {
- bb[cwidth-1] = s.format[rTip]
- }
-
- for i := cwidth; i < width; i++ {
- bb[i] = s.format[rEmpty]
- }
-
- if s.reverse {
- if cwidth > 0 && cwidth < width {
- bb[cwidth-1] = s.format[rRevTip]
- }
- for i := len(bb) - 1; i >= 0; i-- {
- w.Write(bb[i])
- }
- } else {
- for i := 0; i < len(bb); i++ {
- w.Write(bb[i])
- }
- }
- w.Write(s.format[rRight])
-}
diff --git a/vendor/github.com/vbauerster/mpb/bar_option.go b/vendor/github.com/vbauerster/mpb/bar_option.go
deleted file mode 100644
index e9a4bd2a7..000000000
--- a/vendor/github.com/vbauerster/mpb/bar_option.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package mpb
-
-import (
- "io"
-
- "github.com/vbauerster/mpb/decor"
-)
-
-// BarOption is a function option which changes the default behavior of a bar.
-type BarOption func(*bState)
-
-// AppendDecorators let you inject decorators to the bar's right side.
-func AppendDecorators(appenders ...decor.Decorator) BarOption {
- return func(s *bState) {
- for _, decorator := range appenders {
- if ar, ok := decorator.(decor.AmountReceiver); ok {
- s.amountReceivers = append(s.amountReceivers, ar)
- }
- if sl, ok := decorator.(decor.ShutdownListener); ok {
- s.shutdownListeners = append(s.shutdownListeners, sl)
- }
- s.aDecorators = append(s.aDecorators, decorator)
- }
- }
-}
-
-// PrependDecorators let you inject decorators to the bar's left side.
-func PrependDecorators(prependers ...decor.Decorator) BarOption {
- return func(s *bState) {
- for _, decorator := range prependers {
- if ar, ok := decorator.(decor.AmountReceiver); ok {
- s.amountReceivers = append(s.amountReceivers, ar)
- }
- if sl, ok := decorator.(decor.ShutdownListener); ok {
- s.shutdownListeners = append(s.shutdownListeners, sl)
- }
- s.pDecorators = append(s.pDecorators, decorator)
- }
- }
-}
-
-// BarID sets bar id.
-func BarID(id int) BarOption {
- return func(s *bState) {
- s.id = id
- }
-}
-
-// BarWidth sets bar width independent of the container.
-func BarWidth(width int) BarOption {
- return func(s *bState) {
- s.width = width
- }
-}
-
-// BarRemoveOnComplete is a flag, if set whole bar line will be removed
-// on complete event. If both BarRemoveOnComplete and BarClearOnComplete
-// are set, first bar section gets cleared and then whole bar line
-// gets removed completely.
-func BarRemoveOnComplete() BarOption {
- return func(s *bState) {
- s.removeOnComplete = true
- }
-}
-
-// BarReplaceOnComplete is indicator for delayed bar start, after the
-// `runningBar` is complete. To achieve bar replacement effect,
-// `runningBar` should has its `BarRemoveOnComplete` option set.
-func BarReplaceOnComplete(runningBar *Bar) BarOption {
- return BarParkTo(runningBar)
-}
-
-// BarParkTo same as BarReplaceOnComplete
-func BarParkTo(runningBar *Bar) BarOption {
- return func(s *bState) {
- s.runningBar = runningBar
- }
-}
-
-// BarClearOnComplete is a flag, if set will clear bar section on
-// complete event. If you need to remove a whole bar line, refer to
-// BarRemoveOnComplete.
-func BarClearOnComplete() BarOption {
- return func(s *bState) {
- s.barClearOnComplete = true
- }
-}
-
-// BarPriority sets bar's priority. Zero is highest priority, i.e. bar
-// will be on top. If `BarReplaceOnComplete` option is supplied, this
-// option is ignored.
-func BarPriority(priority int) BarOption {
- return func(s *bState) {
- s.priority = priority
- }
-}
-
-// BarNewLineExtend takes user defined efn, which gets called each
-// render cycle. Any write to provided writer of efn, will appear on
-// new line of respective bar.
-func BarNewLineExtend(efn func(io.Writer, *decor.Statistics)) BarOption {
- return func(s *bState) {
- s.newLineExtendFn = efn
- }
-}
-
-// TrimSpace trims bar's edge spaces.
-func TrimSpace() BarOption {
- return func(s *bState) {
- s.trimSpace = true
- }
-}
-
-// BarStyle sets custom bar style, default one is "[=>-]<+".
-//
-// '[' left bracket rune
-//
-// '=' fill rune
-//
-// '>' tip rune
-//
-// '-' empty rune
-//
-// ']' right bracket rune
-//
-// '<' reverse tip rune, used when BarReverse option is set
-//
-// '+' refill rune, used when *Bar.SetRefill(int64) is called
-//
-// It's ok to provide first five runes only, for example mpb.BarStyle("╢▌▌░╟")
-func BarStyle(style string) BarOption {
- chk := func(filler Filler) (interface{}, bool) {
- if style == "" {
- return nil, false
- }
- t, ok := filler.(*barFiller)
- return t, ok
- }
- cb := func(t interface{}) {
- t.(*barFiller).setStyle(style)
- }
- return MakeFillerTypeSpecificBarOption(chk, cb)
-}
-
-// BarReverse reverse mode, bar will progress from right to left.
-func BarReverse() BarOption {
- chk := func(filler Filler) (interface{}, bool) {
- t, ok := filler.(*barFiller)
- return t, ok
- }
- cb := func(t interface{}) {
- t.(*barFiller).setReverse()
- }
- return MakeFillerTypeSpecificBarOption(chk, cb)
-}
-
-// SpinnerStyle sets custom spinner style.
-// Effective when Filler type is spinner.
-func SpinnerStyle(frames []string) BarOption {
- chk := func(filler Filler) (interface{}, bool) {
- if len(frames) == 0 {
- return nil, false
- }
- t, ok := filler.(*spinnerFiller)
- return t, ok
- }
- cb := func(t interface{}) {
- t.(*spinnerFiller).frames = frames
- }
- return MakeFillerTypeSpecificBarOption(chk, cb)
-}
-
-// MakeFillerTypeSpecificBarOption makes BarOption specific to Filler's
-// actual type. If you implement your own Filler, so most probably
-// you'll need this. See BarStyle or SpinnerStyle for example.
-func MakeFillerTypeSpecificBarOption(
- typeChecker func(Filler) (interface{}, bool),
- cb func(interface{}),
-) BarOption {
- return func(s *bState) {
- if t, ok := typeChecker(s.filler); ok {
- cb(t)
- }
- }
-}
-
-// OptionOnCondition returns option when condition evaluates to true.
-func OptionOnCondition(option BarOption, condition func() bool) BarOption {
- if condition() {
- return option
- }
- return nil
-}
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go
deleted file mode 100644
index 05e31c480..000000000
--- a/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !windows
-
-package cwriter
-
-import (
- "io"
- "strings"
-)
-
-func (w *Writer) clearLines() error {
- _, err := io.WriteString(w.out, strings.Repeat(clearCursorAndLine, w.lineCount))
- return err
-}
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go
deleted file mode 100644
index 747a63484..000000000
--- a/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// +build windows
-
-package cwriter
-
-import (
- "io"
- "strings"
- "syscall"
- "unsafe"
-
- isatty "github.com/mattn/go-isatty"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
- procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
- procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
- procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
-)
-
-type (
- short int16
- word uint16
- dword uint32
-
- coord struct {
- x short
- y short
- }
- smallRect struct {
- left short
- top short
- right short
- bottom short
- }
- consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
- }
-)
-
-// FdWriter is a writer with a file descriptor.
-type FdWriter interface {
- io.Writer
- Fd() uintptr
-}
-
-func (w *Writer) clearLines() error {
- f, ok := w.out.(FdWriter)
- if ok && !isatty.IsTerminal(f.Fd()) {
- _, err := io.WriteString(w.out, strings.Repeat(clearCursorAndLine, w.lineCount))
- return err
- }
- fd := f.Fd()
- var info consoleScreenBufferInfo
- procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&info)))
-
- for i := 0; i < w.lineCount; i++ {
- // move the cursor up
- info.cursorPosition.y--
- procSetConsoleCursorPosition.Call(fd, uintptr(*(*int32)(unsafe.Pointer(&info.cursorPosition))))
- // clear the line
- cursor := coord{
- x: info.window.left,
- y: info.window.top + info.cursorPosition.y,
- }
- var count, w dword
- count = dword(info.size.x)
- procFillConsoleOutputCharacter.Call(fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w)))
- }
- return nil
-}
diff --git a/vendor/github.com/vbauerster/mpb/decor/counters.go b/vendor/github.com/vbauerster/mpb/decor/counters.go
deleted file mode 100644
index 7d581eefb..000000000
--- a/vendor/github.com/vbauerster/mpb/decor/counters.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package decor
-
-import (
- "fmt"
- "io"
- "strconv"
- "strings"
-)
-
-const (
- _ = iota
- KiB = 1 << (iota * 10)
- MiB
- GiB
- TiB
-)
-
-const (
- KB = 1000
- MB = KB * 1000
- GB = MB * 1000
- TB = GB * 1000
-)
-
-const (
- _ = iota
- UnitKiB
- UnitKB
-)
-
-type CounterKiB int64
-
-func (c CounterKiB) Format(st fmt.State, verb rune) {
- prec, ok := st.Precision()
-
- if verb == 'd' || !ok {
- prec = 0
- }
- if verb == 'f' && !ok {
- prec = 6
- }
- // retain old beahavior if s verb used
- if verb == 's' {
- prec = 1
- }
-
- var res, unit string
- switch {
- case c >= TiB:
- unit = "TiB"
- res = strconv.FormatFloat(float64(c)/TiB, 'f', prec, 64)
- case c >= GiB:
- unit = "GiB"
- res = strconv.FormatFloat(float64(c)/GiB, 'f', prec, 64)
- case c >= MiB:
- unit = "MiB"
- res = strconv.FormatFloat(float64(c)/MiB, 'f', prec, 64)
- case c >= KiB:
- unit = "KiB"
- res = strconv.FormatFloat(float64(c)/KiB, 'f', prec, 64)
- default:
- unit = "b"
- res = strconv.FormatInt(int64(c), 10)
- }
-
- if st.Flag(' ') {
- res += " "
- }
- res += unit
-
- if w, ok := st.Width(); ok {
- if len(res) < w {
- pad := strings.Repeat(" ", w-len(res))
- if st.Flag(int('-')) {
- res += pad
- } else {
- res = pad + res
- }
- }
- }
-
- io.WriteString(st, res)
-}
-
-type CounterKB int64
-
-func (c CounterKB) Format(st fmt.State, verb rune) {
- prec, ok := st.Precision()
-
- if verb == 'd' || !ok {
- prec = 0
- }
- if verb == 'f' && !ok {
- prec = 6
- }
- // retain old beahavior if s verb used
- if verb == 's' {
- prec = 1
- }
-
- var res, unit string
- switch {
- case c >= TB:
- unit = "TB"
- res = strconv.FormatFloat(float64(c)/TB, 'f', prec, 64)
- case c >= GB:
- unit = "GB"
- res = strconv.FormatFloat(float64(c)/GB, 'f', prec, 64)
- case c >= MB:
- unit = "MB"
- res = strconv.FormatFloat(float64(c)/MB, 'f', prec, 64)
- case c >= KB:
- unit = "kB"
- res = strconv.FormatFloat(float64(c)/KB, 'f', prec, 64)
- default:
- unit = "b"
- res = strconv.FormatInt(int64(c), 10)
- }
-
- if st.Flag(' ') {
- res += " "
- }
- res += unit
-
- if w, ok := st.Width(); ok {
- if len(res) < w {
- pad := strings.Repeat(" ", w-len(res))
- if st.Flag(int('-')) {
- res += pad
- } else {
- res = pad + res
- }
- }
- }
-
- io.WriteString(st, res)
-}
-
-// CountersNoUnit is a wrapper around Counters with no unit param.
-func CountersNoUnit(pairFormat string, wcc ...WC) Decorator {
- return Counters(0, pairFormat, wcc...)
-}
-
-// CountersKibiByte is a wrapper around Counters with predefined unit
-// UnitKiB (bytes/1024).
-func CountersKibiByte(pairFormat string, wcc ...WC) Decorator {
- return Counters(UnitKiB, pairFormat, wcc...)
-}
-
-// CountersKiloByte is a wrapper around Counters with predefined unit
-// UnitKB (bytes/1000).
-func CountersKiloByte(pairFormat string, wcc ...WC) Decorator {
- return Counters(UnitKB, pairFormat, wcc...)
-}
-
-// Counters decorator with dynamic unit measure adjustment.
-//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
-//
-// `pairFormat` printf compatible verbs for current and total, like "%f" or "%d"
-//
-// `wcc` optional WC config
-//
-// pairFormat example if UnitKB is chosen:
-//
-// "%.1f / %.1f" = "1.0MB / 12.0MB" or "% .1f / % .1f" = "1.0 MB / 12.0 MB"
-func Counters(unit int, pairFormat string, wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &countersDecorator{
- WC: wc,
- unit: unit,
- pairFormat: pairFormat,
- }
- return d
-}
-
-type countersDecorator struct {
- WC
- unit int
- pairFormat string
- completeMsg *string
-}
-
-func (d *countersDecorator) Decor(st *Statistics) string {
- if st.Completed && d.completeMsg != nil {
- return d.FormatMsg(*d.completeMsg)
- }
-
- var str string
- switch d.unit {
- case UnitKiB:
- str = fmt.Sprintf(d.pairFormat, CounterKiB(st.Current), CounterKiB(st.Total))
- case UnitKB:
- str = fmt.Sprintf(d.pairFormat, CounterKB(st.Current), CounterKB(st.Total))
- default:
- str = fmt.Sprintf(d.pairFormat, st.Current, st.Total)
- }
-
- return d.FormatMsg(str)
-}
-
-func (d *countersDecorator) OnCompleteMessage(msg string) {
- d.completeMsg = &msg
-}
diff --git a/vendor/github.com/vbauerster/mpb/decor/decorator.go b/vendor/github.com/vbauerster/mpb/decor/decorator.go
deleted file mode 100644
index 2fe40aea6..000000000
--- a/vendor/github.com/vbauerster/mpb/decor/decorator.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package decor
-
-import (
- "fmt"
- "time"
- "unicode/utf8"
-)
-
-const (
- // DidentRight bit specifies identation direction.
- // |foo |b | With DidentRight
- // | foo| b| Without DidentRight
- DidentRight = 1 << iota
-
- // DextraSpace bit adds extra space, makes sense with DSyncWidth only.
- // When DidentRight bit set, the space will be added to the right,
- // otherwise to the left.
- DextraSpace
-
- // DSyncWidth bit enables same column width synchronization.
- // Effective with multiple bars only.
- DSyncWidth
-
- // DSyncWidthR is shortcut for DSyncWidth|DidentRight
- DSyncWidthR = DSyncWidth | DidentRight
-
- // DSyncSpace is shortcut for DSyncWidth|DextraSpace
- DSyncSpace = DSyncWidth | DextraSpace
-
- // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight
- DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight
-)
-
-// TimeStyle enum.
-type TimeStyle int
-
-// TimeStyle kinds.
-const (
- ET_STYLE_GO TimeStyle = iota
- ET_STYLE_HHMMSS
- ET_STYLE_HHMM
- ET_STYLE_MMSS
-)
-
-// Statistics is a struct, which gets passed to a Decorator.
-type Statistics struct {
- ID int
- Completed bool
- Total int64
- Current int64
-}
-
-// Decorator interface.
-// A decorator must implement this interface, in order to be used with
-// mpb library.
-type Decorator interface {
- Decor(*Statistics) string
- Syncable
-}
-
-// Syncable interface.
-// All decorators implement this interface implicitly. Its Syncable
-// method exposes width sync channel, if sync is enabled.
-type Syncable interface {
- Syncable() (bool, chan int)
-}
-
-// OnCompleteMessenger interface.
-// Decorators implementing this interface suppose to return provided
-// string on complete event.
-type OnCompleteMessenger interface {
- OnCompleteMessage(string)
-}
-
-// AmountReceiver interface.
-// If decorator needs to receive increment amount, so this is the right
-// interface to implement.
-type AmountReceiver interface {
- NextAmount(int, ...time.Duration)
-}
-
-// ShutdownListener interface.
-// If decorator needs to be notified once upon bar shutdown event, so
-// this is the right interface to implement.
-type ShutdownListener interface {
- Shutdown()
-}
-
-// Global convenience shortcuts
-var (
- WCSyncWidth = WC{C: DSyncWidth}
- WCSyncWidthR = WC{C: DSyncWidthR}
- WCSyncSpace = WC{C: DSyncSpace}
- WCSyncSpaceR = WC{C: DSyncSpaceR}
-)
-
-// WC is a struct with two public fields W and C, both of int type.
-// W represents width and C represents bit set of width related config.
-// A decorator should embed WC, in order to become Syncable.
-type WC struct {
- W int
- C int
- format string
- wsync chan int
-}
-
-// FormatMsg formats final message according to WC.W and WC.C.
-// Should be called by any Decorator implementation.
-func (wc WC) FormatMsg(msg string) string {
- if (wc.C & DSyncWidth) != 0 {
- wc.wsync <- utf8.RuneCountInString(msg)
- max := <-wc.wsync
- if max == 0 {
- max = wc.W
- }
- if (wc.C & DextraSpace) != 0 {
- max++
- }
- return fmt.Sprintf(fmt.Sprintf(wc.format, max), msg)
- }
- return fmt.Sprintf(fmt.Sprintf(wc.format, wc.W), msg)
-}
-
-// Init initializes width related config.
-func (wc *WC) Init() {
- wc.format = "%%"
- if (wc.C & DidentRight) != 0 {
- wc.format += "-"
- }
- wc.format += "%ds"
- if (wc.C & DSyncWidth) != 0 {
- wc.wsync = make(chan int)
- }
-}
-
-// Syncable is implementation of Syncable interface.
-func (wc *WC) Syncable() (bool, chan int) {
- return (wc.C & DSyncWidth) != 0, wc.wsync
-}
-
-// OnComplete returns decorator, which wraps provided decorator, with
-// sole purpose to display provided message on complete event.
-//
-// `decorator` Decorator to wrap
-//
-// `message` message to display on complete event
-func OnComplete(decorator Decorator, message string) Decorator {
- if d, ok := decorator.(OnCompleteMessenger); ok {
- d.OnCompleteMessage(message)
- }
- return decorator
-}
diff --git a/vendor/github.com/vbauerster/mpb/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/decor/elapsed.go
deleted file mode 100644
index b2e75852c..000000000
--- a/vendor/github.com/vbauerster/mpb/decor/elapsed.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package decor
-
-import (
- "fmt"
- "time"
-)
-
-// Elapsed returns elapsed time decorator.
-//
-// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
-//
-// `wcc` optional WC config
-func Elapsed(style TimeStyle, wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &elapsedDecorator{
- WC: wc,
- style: style,
- startTime: time.Now(),
- }
- return d
-}
-
-type elapsedDecorator struct {
- WC
- style TimeStyle
- startTime time.Time
- msg string
- completeMsg *string
-}
-
-func (d *elapsedDecorator) Decor(st *Statistics) string {
- if st.Completed {
- if d.completeMsg != nil {
- return d.FormatMsg(*d.completeMsg)
- }
- return d.FormatMsg(d.msg)
- }
-
- timeElapsed := time.Since(d.startTime)
- hours := int64((timeElapsed / time.Hour) % 60)
- minutes := int64((timeElapsed / time.Minute) % 60)
- seconds := int64((timeElapsed / time.Second) % 60)
-
- switch d.style {
- case ET_STYLE_GO:
- d.msg = fmt.Sprint(time.Duration(timeElapsed.Seconds()) * time.Second)
- case ET_STYLE_HHMMSS:
- d.msg = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
- case ET_STYLE_HHMM:
- d.msg = fmt.Sprintf("%02d:%02d", hours, minutes)
- case ET_STYLE_MMSS:
- if hours > 0 {
- d.msg = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
- } else {
- d.msg = fmt.Sprintf("%02d:%02d", minutes, seconds)
- }
- }
-
- return d.FormatMsg(d.msg)
-}
-
-func (d *elapsedDecorator) OnCompleteMessage(msg string) {
- d.completeMsg = &msg
-}
diff --git a/vendor/github.com/vbauerster/mpb/decor/eta.go b/vendor/github.com/vbauerster/mpb/decor/eta.go
deleted file mode 100644
index e8dc979b4..000000000
--- a/vendor/github.com/vbauerster/mpb/decor/eta.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package decor
-
-import (
- "fmt"
- "math"
- "time"
-
- "github.com/VividCortex/ewma"
-)
-
-type TimeNormalizer func(time.Duration) time.Duration
-
-// EwmaETA exponential-weighted-moving-average based ETA decorator.
-//
-// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
-//
-// `age` is the previous N samples to average over.
-//
-// `wcc` optional WC config
-func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator {
- return MovingAverageETA(style, ewma.NewMovingAverage(age), NopNormalizer(), wcc...)
-}
-
-// MovingAverageETA decorator relies on MovingAverage implementation to calculate its average.
-//
-// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
-//
-// `average` available implementations of MovingAverage [ewma.MovingAverage|NewMedian|NewMedianEwma]
-//
-// `normalizer` available implementations are [NopNormalizer|FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
-//
-// `wcc` optional WC config
-func MovingAverageETA(style TimeStyle, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &movingAverageETA{
- WC: wc,
- style: style,
- average: average,
- normalizer: normalizer,
- }
- return d
-}
-
-type movingAverageETA struct {
- WC
- style TimeStyle
- average ewma.MovingAverage
- completeMsg *string
- normalizer TimeNormalizer
-}
-
-func (d *movingAverageETA) Decor(st *Statistics) string {
- if st.Completed && d.completeMsg != nil {
- return d.FormatMsg(*d.completeMsg)
- }
-
- v := math.Round(d.average.Value())
- remaining := d.normalizer(time.Duration((st.Total - st.Current) * int64(v)))
- hours := int64((remaining / time.Hour) % 60)
- minutes := int64((remaining / time.Minute) % 60)
- seconds := int64((remaining / time.Second) % 60)
-
- var str string
- switch d.style {
- case ET_STYLE_GO:
- str = fmt.Sprint(time.Duration(remaining.Seconds()) * time.Second)
- case ET_STYLE_HHMMSS:
- str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
- case ET_STYLE_HHMM:
- str = fmt.Sprintf("%02d:%02d", hours, minutes)
- case ET_STYLE_MMSS:
- if hours > 0 {
- str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
- } else {
- str = fmt.Sprintf("%02d:%02d", minutes, seconds)
- }
- }
-
- return d.FormatMsg(str)
-}
-
-func (d *movingAverageETA) NextAmount(n int, wdd ...time.Duration) {
- var workDuration time.Duration
- for _, wd := range wdd {
- workDuration = wd
- }
- lastItemEstimate := float64(workDuration) / float64(n)
- if math.IsInf(lastItemEstimate, 0) || math.IsNaN(lastItemEstimate) {
- return
- }
- d.average.Add(lastItemEstimate)
-}
-
-func (d *movingAverageETA) OnCompleteMessage(msg string) {
- d.completeMsg = &msg
-}
-
-// AverageETA decorator.
-//
-// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
-//
-// `wcc` optional WC config
-func AverageETA(style TimeStyle, wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &averageETA{
- WC: wc,
- style: style,
- startTime: time.Now(),
- }
- return d
-}
-
-type averageETA struct {
- WC
- style TimeStyle
- startTime time.Time
- completeMsg *string
-}
-
-func (d *averageETA) Decor(st *Statistics) string {
- if st.Completed && d.completeMsg != nil {
- return d.FormatMsg(*d.completeMsg)
- }
-
- var str string
- timeElapsed := time.Since(d.startTime)
- v := math.Round(float64(timeElapsed) / float64(st.Current))
- if math.IsInf(v, 0) || math.IsNaN(v) {
- v = 0
- }
- remaining := time.Duration((st.Total - st.Current) * int64(v))
- hours := int64((remaining / time.Hour) % 60)
- minutes := int64((remaining / time.Minute) % 60)
- seconds := int64((remaining / time.Second) % 60)
-
- switch d.style {
- case ET_STYLE_GO:
- str = fmt.Sprint(time.Duration(remaining.Seconds()) * time.Second)
- case ET_STYLE_HHMMSS:
- str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
- case ET_STYLE_HHMM:
- str = fmt.Sprintf("%02d:%02d", hours, minutes)
- case ET_STYLE_MMSS:
- if hours > 0 {
- str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
- } else {
- str = fmt.Sprintf("%02d:%02d", minutes, seconds)
- }
- }
-
- return d.FormatMsg(str)
-}
-
-func (d *averageETA) OnCompleteMessage(msg string) {
- d.completeMsg = &msg
-}
-
-func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer {
- var normalized time.Duration
- var lastCall time.Time
- return func(remaining time.Duration) time.Duration {
- if diff := normalized - remaining; diff <= 0 || diff > maxTolerate || remaining < maxTolerate/2 {
- normalized = remaining
- lastCall = time.Now()
- return remaining
- }
- normalized -= time.Since(lastCall)
- lastCall = time.Now()
- return normalized
- }
-}
-
-func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer {
- var normalized time.Duration
- var lastCall time.Time
- var count int
- return func(remaining time.Duration) time.Duration {
- if count == 0 || remaining <= time.Duration(15*time.Second) {
- count = updInterval
- normalized = remaining
- lastCall = time.Now()
- return remaining
- }
- count--
- normalized -= time.Since(lastCall)
- lastCall = time.Now()
- if normalized > 0 {
- return normalized
- }
- return remaining
- }
-}
-
-func NopNormalizer() TimeNormalizer {
- return func(remaining time.Duration) time.Duration {
- return remaining
- }
-}
diff --git a/vendor/github.com/vbauerster/mpb/decor/name.go b/vendor/github.com/vbauerster/mpb/decor/name.go
deleted file mode 100644
index a5a5d1469..000000000
--- a/vendor/github.com/vbauerster/mpb/decor/name.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package decor
-
-// StaticName returns name decorator.
-//
-// `name` string to display
-//
-// `wcc` optional WC config
-func StaticName(name string, wcc ...WC) Decorator {
- return Name(name, wcc...)
-}
-
-// Name returns name decorator.
-//
-// `name` string to display
-//
-// `wcc` optional WC config
-func Name(name string, wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &nameDecorator{
- WC: wc,
- msg: name,
- }
- return d
-}
-
-type nameDecorator struct {
- WC
- msg string
- complete *string
-}
-
-func (d *nameDecorator) Decor(st *Statistics) string {
- if st.Completed && d.complete != nil {
- return d.FormatMsg(*d.complete)
- }
- return d.FormatMsg(d.msg)
-}
-
-func (d *nameDecorator) OnCompleteMessage(msg string) {
- d.complete = &msg
-}
diff --git a/vendor/github.com/vbauerster/mpb/decor/percentage.go b/vendor/github.com/vbauerster/mpb/decor/percentage.go
deleted file mode 100644
index 078fbcf89..000000000
--- a/vendor/github.com/vbauerster/mpb/decor/percentage.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package decor
-
-import (
- "fmt"
-
- "github.com/vbauerster/mpb/internal"
-)
-
-// Percentage returns percentage decorator.
-//
-// `wcc` optional WC config
-func Percentage(wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &percentageDecorator{
- WC: wc,
- }
- return d
-}
-
-type percentageDecorator struct {
- WC
- completeMsg *string
-}
-
-func (d *percentageDecorator) Decor(st *Statistics) string {
- if st.Completed && d.completeMsg != nil {
- return d.FormatMsg(*d.completeMsg)
- }
- str := fmt.Sprintf("%d %%", internal.Percentage(st.Total, st.Current, 100))
- return d.FormatMsg(str)
-}
-
-func (d *percentageDecorator) OnCompleteMessage(msg string) {
- d.completeMsg = &msg
-}
diff --git a/vendor/github.com/vbauerster/mpb/decor/speed.go b/vendor/github.com/vbauerster/mpb/decor/speed.go
deleted file mode 100644
index 74658ce41..000000000
--- a/vendor/github.com/vbauerster/mpb/decor/speed.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package decor
-
-import (
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
- "time"
-
- "github.com/VividCortex/ewma"
-)
-
-type SpeedKiB float64
-
-func (s SpeedKiB) Format(st fmt.State, verb rune) {
- prec, ok := st.Precision()
-
- if verb == 'd' || !ok {
- prec = 0
- }
- if verb == 'f' && !ok {
- prec = 6
- }
- // retain old beahavior if s verb used
- if verb == 's' {
- prec = 1
- }
-
- var res, unit string
- switch {
- case s >= TiB:
- unit = "TiB/s"
- res = strconv.FormatFloat(float64(s)/TiB, 'f', prec, 64)
- case s >= GiB:
- unit = "GiB/s"
- res = strconv.FormatFloat(float64(s)/GiB, 'f', prec, 64)
- case s >= MiB:
- unit = "MiB/s"
- res = strconv.FormatFloat(float64(s)/MiB, 'f', prec, 64)
- case s >= KiB:
- unit = "KiB/s"
- res = strconv.FormatFloat(float64(s)/KiB, 'f', prec, 64)
- default:
- unit = "b/s"
- res = strconv.FormatInt(int64(s), 10)
- }
-
- if st.Flag(' ') {
- res += " "
- }
- res += unit
-
- if w, ok := st.Width(); ok {
- if len(res) < w {
- pad := strings.Repeat(" ", w-len(res))
- if st.Flag(int('-')) {
- res += pad
- } else {
- res = pad + res
- }
- }
- }
-
- io.WriteString(st, res)
-}
-
-type SpeedKB float64
-
-func (s SpeedKB) Format(st fmt.State, verb rune) {
- prec, ok := st.Precision()
-
- if verb == 'd' || !ok {
- prec = 0
- }
- if verb == 'f' && !ok {
- prec = 6
- }
- // retain old beahavior if s verb used
- if verb == 's' {
- prec = 1
- }
-
- var res, unit string
- switch {
- case s >= TB:
- unit = "TB/s"
- res = strconv.FormatFloat(float64(s)/TB, 'f', prec, 64)
- case s >= GB:
- unit = "GB/s"
- res = strconv.FormatFloat(float64(s)/GB, 'f', prec, 64)
- case s >= MB:
- unit = "MB/s"
- res = strconv.FormatFloat(float64(s)/MB, 'f', prec, 64)
- case s >= KB:
- unit = "kB/s"
- res = strconv.FormatFloat(float64(s)/KB, 'f', prec, 64)
- default:
- unit = "b/s"
- res = strconv.FormatInt(int64(s), 10)
- }
-
- if st.Flag(' ') {
- res += " "
- }
- res += unit
-
- if w, ok := st.Width(); ok {
- if len(res) < w {
- pad := strings.Repeat(" ", w-len(res))
- if st.Flag(int('-')) {
- res += pad
- } else {
- res = pad + res
- }
- }
- }
-
- io.WriteString(st, res)
-}
-
-// EwmaSpeed exponential-weighted-moving-average based speed decorator,
-// with dynamic unit measure adjustment.
-//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
-//
-// `unitFormat` printf compatible verb for value, like "%f" or "%d"
-//
-// `average` MovingAverage implementation
-//
-// `wcc` optional WC config
-//
-// unitFormat example if UnitKiB is chosen:
-//
-// "%.1f" = "1.0MiB/s" or "% .1f" = "1.0 MiB/s"
-func EwmaSpeed(unit int, unitFormat string, age float64, wcc ...WC) Decorator {
- return MovingAverageSpeed(unit, unitFormat, ewma.NewMovingAverage(age), wcc...)
-}
-
-// MovingAverageSpeed decorator relies on MovingAverage implementation
-// to calculate its average.
-//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
-//
-// `unitFormat` printf compatible verb for value, like "%f" or "%d"
-//
-// `average` MovingAverage implementation
-//
-// `wcc` optional WC config
-func MovingAverageSpeed(unit int, unitFormat string, average MovingAverage, wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &movingAverageSpeed{
- WC: wc,
- unit: unit,
- unitFormat: unitFormat,
- average: average,
- }
- return d
-}
-
-type movingAverageSpeed struct {
- WC
- unit int
- unitFormat string
- average ewma.MovingAverage
- msg string
- completeMsg *string
-}
-
-func (d *movingAverageSpeed) Decor(st *Statistics) string {
- if st.Completed {
- if d.completeMsg != nil {
- return d.FormatMsg(*d.completeMsg)
- }
- return d.FormatMsg(d.msg)
- }
-
- speed := d.average.Value()
- switch d.unit {
- case UnitKiB:
- d.msg = fmt.Sprintf(d.unitFormat, SpeedKiB(speed))
- case UnitKB:
- d.msg = fmt.Sprintf(d.unitFormat, SpeedKB(speed))
- default:
- d.msg = fmt.Sprintf(d.unitFormat, speed)
- }
-
- return d.FormatMsg(d.msg)
-}
-
-func (s *movingAverageSpeed) NextAmount(n int, wdd ...time.Duration) {
- var workDuration time.Duration
- for _, wd := range wdd {
- workDuration = wd
- }
- speed := float64(n) / workDuration.Seconds() / 1000
- if math.IsInf(speed, 0) || math.IsNaN(speed) {
- return
- }
- s.average.Add(speed)
-}
-
-func (d *movingAverageSpeed) OnCompleteMessage(msg string) {
- d.completeMsg = &msg
-}
-
-// AverageSpeed decorator with dynamic unit measure adjustment.
-//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
-//
-// `unitFormat` printf compatible verb for value, like "%f" or "%d"
-//
-// `wcc` optional WC config
-//
-// unitFormat example if UnitKiB is chosen:
-//
-// "%.1f" = "1.0MiB/s" or "% .1f" = "1.0 MiB/s"
-func AverageSpeed(unit int, unitFormat string, wcc ...WC) Decorator {
- var wc WC
- for _, widthConf := range wcc {
- wc = widthConf
- }
- wc.Init()
- d := &averageSpeed{
- WC: wc,
- unit: unit,
- unitFormat: unitFormat,
- startTime: time.Now(),
- }
- return d
-}
-
-type averageSpeed struct {
- WC
- unit int
- unitFormat string
- startTime time.Time
- msg string
- completeMsg *string
-}
-
-func (d *averageSpeed) Decor(st *Statistics) string {
- if st.Completed {
- if d.completeMsg != nil {
- return d.FormatMsg(*d.completeMsg)
- }
- return d.FormatMsg(d.msg)
- }
-
- timeElapsed := time.Since(d.startTime)
- speed := float64(st.Current) / timeElapsed.Seconds()
-
- switch d.unit {
- case UnitKiB:
- d.msg = fmt.Sprintf(d.unitFormat, SpeedKiB(speed))
- case UnitKB:
- d.msg = fmt.Sprintf(d.unitFormat, SpeedKB(speed))
- default:
- d.msg = fmt.Sprintf(d.unitFormat, speed)
- }
-
- return d.FormatMsg(d.msg)
-}
-
-func (d *averageSpeed) OnCompleteMessage(msg string) {
- d.completeMsg = &msg
-}
diff --git a/vendor/github.com/vbauerster/mpb/doc.go b/vendor/github.com/vbauerster/mpb/doc.go
deleted file mode 100644
index 16245956a..000000000
--- a/vendor/github.com/vbauerster/mpb/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright (C) 2016-2018 Vladimir Bauer
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package mpb is a library for rendering progress bars in terminal applications.
-package mpb
diff --git a/vendor/github.com/vbauerster/mpb/go.test.sh b/vendor/github.com/vbauerster/mpb/go.test.sh
deleted file mode 100644
index 34dbbfb31..000000000
--- a/vendor/github.com/vbauerster/mpb/go.test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list ./... | grep -v vendor); do
- go test -race -coverprofile=profile.out -covermode=atomic $d
- if [ -f profile.out ]; then
- cat profile.out >> coverage.txt
- rm profile.out
- fi
-done
diff --git a/vendor/github.com/vbauerster/mpb/internal/percentage.go b/vendor/github.com/vbauerster/mpb/internal/percentage.go
deleted file mode 100644
index 0483d2598..000000000
--- a/vendor/github.com/vbauerster/mpb/internal/percentage.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package internal
-
-import "math"
-
-// Percentage is a helper function, to calculate percentage.
-func Percentage(total, current, width int64) int64 {
- if total <= 0 {
- return 0
- }
- p := float64(width*current) / float64(total)
- return int64(math.Round(p))
-}
diff --git a/vendor/github.com/vbauerster/mpb/options.go b/vendor/github.com/vbauerster/mpb/options.go
deleted file mode 100644
index 44a6ee3f3..000000000
--- a/vendor/github.com/vbauerster/mpb/options.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package mpb
-
-import (
- "context"
- "io"
- "sync"
- "time"
-
- "github.com/vbauerster/mpb/cwriter"
-)
-
-// ProgressOption is a function option which changes the default
-// behavior of progress pool, if passed to mpb.New(...ProgressOption).
-type ProgressOption func(*pState)
-
-// WithWaitGroup provides means to have a single joint point. If
-// *sync.WaitGroup is provided, you can safely call just p.Wait()
-// without calling Wait() on provided *sync.WaitGroup. Makes sense
-// when there are more than one bar to render.
-func WithWaitGroup(wg *sync.WaitGroup) ProgressOption {
- return func(s *pState) {
- s.uwg = wg
- }
-}
-
-// WithWidth sets container width. Default is 80. Bars inherit this
-// width, as long as no BarWidth is applied.
-func WithWidth(w int) ProgressOption {
- return func(s *pState) {
- if w >= 0 {
- s.width = w
- }
- }
-}
-
-// WithRefreshRate overrides default 120ms refresh rate.
-func WithRefreshRate(d time.Duration) ProgressOption {
- return func(s *pState) {
- if d < 10*time.Millisecond {
- return
- }
- s.rr = d
- }
-}
-
-// WithManualRefresh disables internal auto refresh time.Ticker.
-// Refresh will occur upon receive value from provided ch.
-func WithManualRefresh(ch <-chan time.Time) ProgressOption {
- return func(s *pState) {
- s.manualRefreshCh = ch
- }
-}
-
-// WithContext provided context will be used for cancellation purposes.
-func WithContext(ctx context.Context) ProgressOption {
- return func(s *pState) {
- if ctx == nil {
- return
- }
- s.ctx = ctx
- }
-}
-
-// WithShutdownNotifier provided chanel will be closed, after all bars
-// have been rendered.
-func WithShutdownNotifier(ch chan struct{}) ProgressOption {
- return func(s *pState) {
- s.shutdownNotifier = ch
- }
-}
-
-// WithOutput overrides default output os.Stdout.
-func WithOutput(w io.Writer) ProgressOption {
- return func(s *pState) {
- if w == nil {
- return
- }
- s.cw = cwriter.New(w)
- }
-}
-
-// WithDebugOutput sets debug output.
-func WithDebugOutput(w io.Writer) ProgressOption {
- return func(s *pState) {
- if w == nil {
- return
- }
- s.debugOut = w
- }
-}
diff --git a/vendor/github.com/vbauerster/mpb/progress.go b/vendor/github.com/vbauerster/mpb/progress.go
deleted file mode 100644
index f9e25af79..000000000
--- a/vendor/github.com/vbauerster/mpb/progress.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package mpb
-
-import (
- "container/heap"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "sync"
- "time"
-
- "github.com/vbauerster/mpb/cwriter"
-)
-
-const (
- // default RefreshRate
- prr = 120 * time.Millisecond
- // default width
- pwidth = 80
-)
-
-// Progress represents the container that renders Progress bars
-type Progress struct {
- wg *sync.WaitGroup
- uwg *sync.WaitGroup
- operateState chan func(*pState)
- done chan struct{}
-}
-
-type pState struct {
- bHeap *priorityQueue
- shutdownPending []*Bar
- heapUpdated bool
- zeroWait bool
- idCounter int
- width int
- format string
- rr time.Duration
- cw *cwriter.Writer
- pMatrix map[int][]chan int
- aMatrix map[int][]chan int
-
- // following are provided/overrided by user
- ctx context.Context
- uwg *sync.WaitGroup
- manualRefreshCh <-chan time.Time
- shutdownNotifier chan struct{}
- waitBars map[*Bar]*Bar
- debugOut io.Writer
-}
-
-// New creates new Progress instance, which orchestrates bars rendering
-// process. Accepts mpb.ProgressOption funcs for customization.
-func New(options ...ProgressOption) *Progress {
- pq := make(priorityQueue, 0)
- heap.Init(&pq)
- s := &pState{
- ctx: context.Background(),
- bHeap: &pq,
- width: pwidth,
- cw: cwriter.New(os.Stdout),
- rr: prr,
- waitBars: make(map[*Bar]*Bar),
- debugOut: ioutil.Discard,
- }
-
- for _, opt := range options {
- if opt != nil {
- opt(s)
- }
- }
-
- p := &Progress{
- uwg: s.uwg,
- wg: new(sync.WaitGroup),
- operateState: make(chan func(*pState)),
- done: make(chan struct{}),
- }
- go p.serve(s)
- return p
-}
-
-// AddBar creates a new progress bar and adds to the container.
-func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
- return p.Add(total, newDefaultBarFiller(), options...)
-}
-
-// AddSpinner creates a new spinner bar and adds to the container.
-func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar {
- filler := &spinnerFiller{
- frames: defaultSpinnerStyle,
- alignment: alignment,
- }
- return p.Add(total, filler, options...)
-}
-
-// Add creates a bar which renders itself by provided filler.
-func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar {
- if filler == nil {
- filler = newDefaultBarFiller()
- }
- p.wg.Add(1)
- result := make(chan *Bar)
- select {
- case p.operateState <- func(s *pState) {
- b := newBar(s.ctx, p.wg, filler, s.idCounter, s.width, total, options...)
- if b.runningBar != nil {
- s.waitBars[b.runningBar] = b
- } else {
- heap.Push(s.bHeap, b)
- s.heapUpdated = true
- }
- s.idCounter++
- result <- b
- }:
- return <-result
- case <-p.done:
- p.wg.Done()
- return nil
- }
-}
-
-// Abort is only effective while bar progress is running, it means
-// remove bar now without waiting for its completion. If bar is already
-// completed, there is nothing to abort. If you need to remove bar
-// after completion, use BarRemoveOnComplete BarOption.
-func (p *Progress) Abort(b *Bar, remove bool) {
- select {
- case p.operateState <- func(s *pState) {
- if b.index < 0 {
- return
- }
- if remove {
- s.heapUpdated = heap.Remove(s.bHeap, b.index) != nil
- }
- s.shutdownPending = append(s.shutdownPending, b)
- }:
- case <-p.done:
- }
-}
-
-// UpdateBarPriority provides a way to change bar's order position.
-// Zero is highest priority, i.e. bar will be on top.
-func (p *Progress) UpdateBarPriority(b *Bar, priority int) {
- select {
- case p.operateState <- func(s *pState) { s.bHeap.update(b, priority) }:
- case <-p.done:
- }
-}
-
-// BarCount returns bars count
-func (p *Progress) BarCount() int {
- result := make(chan int, 1)
- select {
- case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }:
- return <-result
- case <-p.done:
- return 0
- }
-}
-
-// Wait first waits for user provided *sync.WaitGroup, if any, then
-// waits far all bars to complete and finally shutdowns master goroutine.
-// After this method has been called, there is no way to reuse *Progress
-// instance.
-func (p *Progress) Wait() {
- if p.uwg != nil {
- p.uwg.Wait()
- }
-
- p.wg.Wait()
-
- select {
- case p.operateState <- func(s *pState) { s.zeroWait = true }:
- <-p.done
- case <-p.done:
- }
-}
-
-func (s *pState) updateSyncMatrix() {
- s.pMatrix = make(map[int][]chan int)
- s.aMatrix = make(map[int][]chan int)
- for i := 0; i < s.bHeap.Len(); i++ {
- bar := (*s.bHeap)[i]
- table := bar.wSyncTable()
- pRow, aRow := table[0], table[1]
-
- for i, ch := range pRow {
- s.pMatrix[i] = append(s.pMatrix[i], ch)
- }
-
- for i, ch := range aRow {
- s.aMatrix[i] = append(s.aMatrix[i], ch)
- }
- }
-}
-
-func (s *pState) render(tw int) {
- if s.heapUpdated {
- s.updateSyncMatrix()
- s.heapUpdated = false
- }
- syncWidth(s.pMatrix)
- syncWidth(s.aMatrix)
-
- for i := 0; i < s.bHeap.Len(); i++ {
- bar := (*s.bHeap)[i]
- go bar.render(s.debugOut, tw)
- }
-
- if err := s.flush(s.bHeap.Len()); err != nil {
- fmt.Fprintf(s.debugOut, "%s %s %v\n", "[mpb]", time.Now(), err)
- }
-}
-
-func (s *pState) flush(lineCount int) error {
- for s.bHeap.Len() > 0 {
- bar := heap.Pop(s.bHeap).(*Bar)
- frameReader := <-bar.frameReaderCh
- defer func() {
- if frameReader.toShutdown {
- // shutdown at next flush, in other words decrement underlying WaitGroup
- // only after the bar with completed state has been flushed. this
- // ensures no bar ends up with less than 100% rendered.
- s.shutdownPending = append(s.shutdownPending, bar)
- if replacementBar, ok := s.waitBars[bar]; ok {
- heap.Push(s.bHeap, replacementBar)
- s.heapUpdated = true
- delete(s.waitBars, bar)
- }
- if frameReader.removeOnComplete {
- s.heapUpdated = true
- return
- }
- }
- heap.Push(s.bHeap, bar)
- }()
- s.cw.ReadFrom(frameReader)
- lineCount += frameReader.extendedLines
- }
-
- for i := len(s.shutdownPending) - 1; i >= 0; i-- {
- close(s.shutdownPending[i].shutdown)
- s.shutdownPending = s.shutdownPending[:i]
- }
-
- return s.cw.Flush(lineCount)
-}
-
-func syncWidth(matrix map[int][]chan int) {
- for _, column := range matrix {
- column := column
- go func() {
- var maxWidth int
- for _, ch := range column {
- w := <-ch
- if w > maxWidth {
- maxWidth = w
- }
- }
- for _, ch := range column {
- ch <- maxWidth
- }
- }()
- }
-}
diff --git a/vendor/github.com/vbauerster/mpb/progress_posix.go b/vendor/github.com/vbauerster/mpb/progress_posix.go
deleted file mode 100644
index 545245a42..000000000
--- a/vendor/github.com/vbauerster/mpb/progress_posix.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// +build !windows
-
-package mpb
-
-import (
- "os"
- "os/signal"
- "syscall"
- "time"
-)
-
-func (p *Progress) serve(s *pState) {
-
- var ticker *time.Ticker
- var refreshCh <-chan time.Time
- var winch chan os.Signal
- var resumeTimer *time.Timer
- var resumeEvent <-chan time.Time
- winchIdleDur := s.rr * 2
-
- if s.manualRefreshCh == nil {
- ticker = time.NewTicker(s.rr)
- refreshCh = ticker.C
- winch = make(chan os.Signal, 2)
- signal.Notify(winch, syscall.SIGWINCH)
- } else {
- refreshCh = s.manualRefreshCh
- }
-
- for {
- select {
- case op := <-p.operateState:
- op(s)
- case <-refreshCh:
- if s.zeroWait {
- if s.manualRefreshCh == nil {
- signal.Stop(winch)
- ticker.Stop()
- }
- if s.shutdownNotifier != nil {
- close(s.shutdownNotifier)
- }
- close(p.done)
- return
- }
- tw, err := s.cw.GetWidth()
- if err != nil {
- tw = s.width
- }
- s.render(tw)
- case <-winch:
- tw, err := s.cw.GetWidth()
- if err != nil {
- tw = s.width
- }
- s.render(tw - tw/8)
- if resumeTimer != nil && resumeTimer.Reset(winchIdleDur) {
- break
- }
- ticker.Stop()
- resumeTimer = time.NewTimer(winchIdleDur)
- resumeEvent = resumeTimer.C
- case <-resumeEvent:
- ticker = time.NewTicker(s.rr)
- refreshCh = ticker.C
- resumeEvent = nil
- resumeTimer = nil
- }
- }
-}
diff --git a/vendor/github.com/vbauerster/mpb/progress_windows.go b/vendor/github.com/vbauerster/mpb/progress_windows.go
deleted file mode 100644
index cab03d36c..000000000
--- a/vendor/github.com/vbauerster/mpb/progress_windows.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// +build windows
-
-package mpb
-
-import (
- "time"
-)
-
-func (p *Progress) serve(s *pState) {
-
- var ticker *time.Ticker
- var refreshCh <-chan time.Time
-
- if s.manualRefreshCh == nil {
- ticker = time.NewTicker(s.rr)
- refreshCh = ticker.C
- } else {
- refreshCh = s.manualRefreshCh
- }
-
- for {
- select {
- case op := <-p.operateState:
- op(s)
- case <-refreshCh:
- if s.zeroWait {
- if s.manualRefreshCh == nil {
- ticker.Stop()
- }
- if s.shutdownNotifier != nil {
- close(s.shutdownNotifier)
- }
- close(p.done)
- return
- }
- tw, err := s.cw.GetWidth()
- if err != nil {
- tw = s.width
- }
- s.render(tw)
- }
- }
-}
diff --git a/vendor/github.com/vbauerster/mpb/proxyreader.go b/vendor/github.com/vbauerster/mpb/proxyreader.go
deleted file mode 100644
index d2692ccf4..000000000
--- a/vendor/github.com/vbauerster/mpb/proxyreader.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package mpb
-
-import (
- "io"
- "time"
-)
-
-// proxyReader is io.Reader wrapper, for proxy read bytes
-type proxyReader struct {
- io.ReadCloser
- bar *Bar
- iT time.Time
-}
-
-func (pr *proxyReader) Read(p []byte) (n int, err error) {
- n, err = pr.ReadCloser.Read(p)
- if n > 0 {
- pr.bar.IncrBy(n, time.Since(pr.iT))
- pr.iT = time.Now()
- }
- return
-}
diff --git a/vendor/github.com/vbauerster/mpb/.gitignore b/vendor/github.com/vbauerster/mpb/v4/.gitignore
index 63bd91672..63bd91672 100644
--- a/vendor/github.com/vbauerster/mpb/.gitignore
+++ b/vendor/github.com/vbauerster/mpb/v4/.gitignore
diff --git a/vendor/github.com/vbauerster/mpb/v4/.travis.yml b/vendor/github.com/vbauerster/mpb/v4/.travis.yml
new file mode 100644
index 000000000..997ae32d6
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.12.x
+ - 1.13.x
+
+env:
+ - GO111MODULE=on
+
+script:
+ - go test -race ./...
+ - for i in _examples/*/; do go build $i/*.go || exit 1; done
diff --git a/vendor/github.com/vbauerster/mpb/README.md b/vendor/github.com/vbauerster/mpb/v4/README.md
index f96857c47..003fb5987 100644
--- a/vendor/github.com/vbauerster/mpb/README.md
+++ b/vendor/github.com/vbauerster/mpb/v4/README.md
@@ -3,43 +3,41 @@
[![GoDoc](https://godoc.org/github.com/vbauerster/mpb?status.svg)](https://godoc.org/github.com/vbauerster/mpb)
[![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb)
[![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb)
-[![codecov](https://codecov.io/gh/vbauerster/mpb/branch/master/graph/badge.svg)](https://codecov.io/gh/vbauerster/mpb)
**mpb** is a Go lib for rendering progress bars in terminal applications.
## Features
* __Multiple Bars__: Multiple progress bars are supported
-* __Dynamic Total__: [Set total](https://github.com/vbauerster/mpb/issues/9#issuecomment-344448984) while bar is running
+* __Dynamic Total__: Set total while bar is running
* __Dynamic Add/Remove__: Dynamically add or remove bars
* __Cancellation__: Cancel whole rendering process
* __Predefined Decorators__: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter
* __Decorator's width sync__: Synchronized decorator's width among multiple bars
-## Installation
+## Usage
-```sh
-go get github.com/vbauerster/mpb
-```
+#### [Rendering single bar](_examples/singleBar/main.go)
+```go
+package main
-_Note:_ it is preferable to go get from github.com, rather than gopkg.in. See issue [#11](https://github.com/vbauerster/mpb/issues/11).
+import (
+ "math/rand"
+ "time"
-## Usage
+ "github.com/vbauerster/mpb/v4"
+ "github.com/vbauerster/mpb/v4/decor"
+)
-#### [Rendering single bar](examples/singleBar/main.go)
-```go
- p := mpb.New(
- // override default (80) width
- mpb.WithWidth(64),
- // override default 120ms refresh rate
- mpb.WithRefreshRate(180*time.Millisecond),
- )
+func main() {
+ // initialize progress container, with custom width
+ p := mpb.New(mpb.WithWidth(64))
total := 100
name := "Single Bar:"
- // adding a single bar
+ // adding a single bar, which will inherit container's width
bar := p.AddBar(int64(total),
- // override default "[=>-]" style
+ // override DefaultBarStyle, which is "[=>-]<+"
mpb.BarStyle("╢▌▌░╟"),
mpb.PrependDecorators(
// display our name with one space on the right
@@ -57,16 +55,18 @@ _Note:_ it is preferable to go get from github.com, rather than gopkg.in. See is
for i := 0; i < total; i++ {
start := time.Now()
time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10)
- // ewma based decorators require work duration measurement
- bar.IncrBy(1, time.Since(start))
+ // since ewma decorator is used, we need to pass time.Since(start)
+ bar.Increment(time.Since(start))
}
// wait for our bar to complete and flush
p.Wait()
+}
```
-#### [Rendering multiple bars](examples/simple/main.go)
+#### [Rendering multiple bars](_examples/multiBars//main.go)
```go
var wg sync.WaitGroup
+ // pass &wg (optional), so p will wait for it eventually
p := mpb.New(mpb.WithWaitGroup(&wg))
total, numBars := 100, 3
wg.Add(numBars)
@@ -91,27 +91,28 @@ _Note:_ it is preferable to go get from github.com, rather than gopkg.in. See is
// simulating some work
go func() {
defer wg.Done()
+ rng := rand.New(rand.NewSource(time.Now().UnixNano()))
max := 100 * time.Millisecond
for i := 0; i < total; i++ {
start := time.Now()
- time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10)
- // ewma based decorators require work duration measurement
- bar.IncrBy(1, time.Since(start))
+ time.Sleep(time.Duration(rng.Intn(10)+1) * max / 10)
+ // since ewma decorator is used, we need to pass time.Since(start)
+ bar.Increment(time.Since(start))
}
}()
}
- // wait for all bars to complete and flush
+ // Waiting for passed &wg and for all bars to complete and flush
p.Wait()
```
-#### [Dynamic total](examples/dynTotal/main.go)
+#### [Dynamic total](_examples/dynTotal/main.go)
-![dynamic total](examples/gifs/godEMrCZmJkHYH1X9dN4Nm0U7.svg)
+![dynamic total](_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg)
-#### [Complex example](examples/complex/main.go)
+#### [Complex example](_examples/complex/main.go)
-![complex](examples/gifs/wHzf1M7sd7B3zVa2scBMnjqRf.svg)
+![complex](_svg/wHzf1M7sd7B3zVa2scBMnjqRf.svg)
-#### [Bytes counters](examples/io/single/main.go)
+#### [Bytes counters](_examples/io/main.go)
-![byte counters](examples/gifs/hIpTa3A5rQz65ssiVuRJu87X6.svg)
+![byte counters](_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg)
diff --git a/vendor/github.com/vbauerster/mpb/v4/UNLICENSE b/vendor/github.com/vbauerster/mpb/v4/UNLICENSE
new file mode 100644
index 000000000..68a49daad
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/UNLICENSE
@@ -0,0 +1,24 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to <http://unlicense.org/>
diff --git a/vendor/github.com/vbauerster/mpb/v4/bar.go b/vendor/github.com/vbauerster/mpb/v4/bar.go
new file mode 100644
index 000000000..c362da739
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/bar.go
@@ -0,0 +1,477 @@
+package mpb
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/vbauerster/mpb/v4/decor"
+)
+
+// Filler interface.
+// Bar renders by calling Filler's Fill method. You can literally have
+// any bar kind, by implementing this interface and passing it to the
+// *Progress.Add method.
+type Filler interface {
+ Fill(w io.Writer, width int, stat *decor.Statistics)
+}
+
+// FillerFunc is function type adapter to convert function into Filler.
+type FillerFunc func(w io.Writer, width int, stat *decor.Statistics)
+
+func (f FillerFunc) Fill(w io.Writer, width int, stat *decor.Statistics) {
+ f(w, width, stat)
+}
+
+// Wrapper interface.
+// If you're implementing custom Filler by wrapping a built-in one,
+// it is necessary to implement this interface to retain functionality
+// of built-in Filler.
+type Wrapper interface {
+ Base() Filler
+}
+
+// Bar represents a progress Bar.
+type Bar struct {
+ priority int // used by heap
+ index int // used by heap
+
+ extendedLines int
+ toShutdown bool
+ toDrop bool
+ noPop bool
+ operateState chan func(*bState)
+ frameCh chan io.Reader
+ syncTableCh chan [][]chan int
+ completed chan bool
+
+ // cancel is called either by user or on complete event
+ cancel func()
+ // done is closed after cacheState is assigned
+ done chan struct{}
+ // cacheState is populated, right after close(shutdown)
+ cacheState *bState
+
+ container *Progress
+ dlogger *log.Logger
+ recoveredPanic interface{}
+}
+
+type extFunc func(in io.Reader, tw int, st *decor.Statistics) (out io.Reader, lines int)
+
+type bState struct {
+ baseF Filler
+ filler Filler
+ id int
+ width int
+ total int64
+ current int64
+ trimSpace bool
+ toComplete bool
+ completeFlushed bool
+ noPop bool
+ aDecorators []decor.Decorator
+ pDecorators []decor.Decorator
+ amountReceivers []decor.AmountReceiver
+ shutdownListeners []decor.ShutdownListener
+ averageAdjusters []decor.AverageAdjuster
+ bufP, bufB, bufA *bytes.Buffer
+ extender extFunc
+
+ // priority overrides *Bar's priority, if set
+ priority int
+ // dropOnComplete propagates to *Bar
+ dropOnComplete bool
+ // runningBar is a key for *pState.parkedBars
+ runningBar *Bar
+
+ debugOut io.Writer
+}
+
+func newBar(container *Progress, bs *bState) *Bar {
+ logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id)
+ ctx, cancel := context.WithCancel(container.ctx)
+
+ bar := &Bar{
+ container: container,
+ priority: bs.priority,
+ toDrop: bs.dropOnComplete,
+ noPop: bs.noPop,
+ operateState: make(chan func(*bState)),
+ frameCh: make(chan io.Reader, 1),
+ syncTableCh: make(chan [][]chan int),
+ completed: make(chan bool, 1),
+ done: make(chan struct{}),
+ cancel: cancel,
+ dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile),
+ }
+
+ go bar.serve(ctx, bs)
+ return bar
+}
+
+// RemoveAllPrependers removes all prepend functions.
+func (b *Bar) RemoveAllPrependers() {
+ select {
+ case b.operateState <- func(s *bState) { s.pDecorators = nil }:
+ case <-b.done:
+ }
+}
+
+// RemoveAllAppenders removes all append functions.
+func (b *Bar) RemoveAllAppenders() {
+ select {
+ case b.operateState <- func(s *bState) { s.aDecorators = nil }:
+ case <-b.done:
+ }
+}
+
+// ProxyReader wraps r with metrics required for progress tracking.
+func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
+ if r == nil {
+ return nil
+ }
+ rc, ok := r.(io.ReadCloser)
+ if !ok {
+ rc = ioutil.NopCloser(r)
+ }
+ prox := &proxyReader{rc, b, time.Now()}
+ if wt, ok := r.(io.WriterTo); ok {
+ return &proxyWriterTo{prox, wt}
+ }
+ return prox
+}
+
+// ID returs id of the bar.
+func (b *Bar) ID() int {
+ result := make(chan int)
+ select {
+ case b.operateState <- func(s *bState) { result <- s.id }:
+ return <-result
+ case <-b.done:
+ return b.cacheState.id
+ }
+}
+
+// Current returns bar's current number, in other words sum of all increments.
+func (b *Bar) Current() int64 {
+ result := make(chan int64)
+ select {
+ case b.operateState <- func(s *bState) { result <- s.current }:
+ return <-result
+ case <-b.done:
+ return b.cacheState.current
+ }
+}
+
+// SetRefill sets refill, if supported by underlying Filler.
+// Useful for resume-able tasks.
+func (b *Bar) SetRefill(amount int64) {
+ type refiller interface {
+ SetRefill(int64)
+ }
+ b.operateState <- func(s *bState) {
+ if f, ok := s.baseF.(refiller); ok {
+ f.SetRefill(amount)
+ }
+ }
+}
+
+// AdjustAverageDecorators updates start time of all average decorators.
+// Useful for resume-able tasks.
+func (b *Bar) AdjustAverageDecorators(startTime time.Time) {
+ b.operateState <- func(s *bState) {
+ for _, adjuster := range s.averageAdjusters {
+ adjuster.AverageAdjust(startTime)
+ }
+ }
+}
+
+// TraverseDecorators traverses all available decorators and calls cb func on each.
+func (b *Bar) TraverseDecorators(cb decor.CBFunc) {
+ b.operateState <- func(s *bState) {
+ for _, decorators := range [...][]decor.Decorator{
+ s.pDecorators,
+ s.aDecorators,
+ } {
+ for _, d := range decorators {
+ cb(extractBaseDecorator(d))
+ }
+ }
+ }
+}
+
+// SetTotal sets total dynamically.
+// Set complete to true, to trigger bar complete event now.
+func (b *Bar) SetTotal(total int64, complete bool) {
+ select {
+ case b.operateState <- func(s *bState) {
+ if total <= 0 {
+ s.total = s.current
+ } else {
+ s.total = total
+ }
+ if complete && !s.toComplete {
+ s.current = s.total
+ s.toComplete = true
+ go b.refreshTillShutdown()
+ }
+ }:
+ case <-b.done:
+ }
+}
+
+// SetCurrent sets progress' current to arbitrary amount.
+func (b *Bar) SetCurrent(current int64, wdd ...time.Duration) {
+ select {
+ case b.operateState <- func(s *bState) {
+ for _, ar := range s.amountReceivers {
+ ar.NextAmount(current-s.current, wdd...)
+ }
+ s.current = current
+ if s.total > 0 && s.current >= s.total {
+ s.current = s.total
+ s.toComplete = true
+ go b.refreshTillShutdown()
+ }
+ }:
+ case <-b.done:
+ }
+}
+
+// Increment is a shorthand for b.IncrInt64(1, wdd...).
+func (b *Bar) Increment(wdd ...time.Duration) {
+ b.IncrInt64(1, wdd...)
+}
+
+// IncrBy is a shorthand for b.IncrInt64(int64(n), wdd...).
+func (b *Bar) IncrBy(n int, wdd ...time.Duration) {
+ b.IncrInt64(int64(n), wdd...)
+}
+
+// IncrInt64 increments progress bar by amount of n. wdd is an optional
+// work duration i.e. time.Since(start), which expected to be passed,
+// if any ewma based decorator is used.
+func (b *Bar) IncrInt64(n int64, wdd ...time.Duration) {
+ select {
+ case b.operateState <- func(s *bState) {
+ for _, ar := range s.amountReceivers {
+ ar.NextAmount(n, wdd...)
+ }
+ s.current += n
+ if s.total > 0 && s.current >= s.total {
+ s.current = s.total
+ s.toComplete = true
+ go b.refreshTillShutdown()
+ }
+ }:
+ case <-b.done:
+ }
+}
+
+// SetPriority changes bar's order among multiple bars. Zero is highest
+// priority, i.e. bar will be on top. If you don't need to set priority
+// dynamically, better use BarPriority option.
+func (b *Bar) SetPriority(priority int) {
+ select {
+ case <-b.done:
+ default:
+ b.container.setBarPriority(b, priority)
+ }
+}
+
+// Abort interrupts bar's running goroutine. Call this, if you'd like
+// to stop/remove bar before completion event. It has no effect after
+// completion event. If drop is true bar will be removed as well.
+func (b *Bar) Abort(drop bool) {
+ select {
+ case <-b.done:
+ default:
+ if drop {
+ b.container.dropBar(b)
+ }
+ b.cancel()
+ }
+}
+
+// Completed reports whether the bar is in completed state.
+func (b *Bar) Completed() bool {
+ select {
+ case b.operateState <- func(s *bState) { b.completed <- s.toComplete }:
+ return <-b.completed
+ case <-b.done:
+ return true
+ }
+}
+
+func (b *Bar) serve(ctx context.Context, s *bState) {
+ defer b.container.bwg.Done()
+ for {
+ select {
+ case op := <-b.operateState:
+ op(s)
+ case <-ctx.Done():
+ b.cacheState = s
+ close(b.done)
+ // Notifying decorators about shutdown event
+ for _, sl := range s.shutdownListeners {
+ sl.Shutdown()
+ }
+ return
+ }
+ }
+}
+
+func (b *Bar) render(tw int) {
+ if b.recoveredPanic != nil {
+ b.toShutdown = false
+ b.frameCh <- b.panicToFrame(tw)
+ return
+ }
+ select {
+ case b.operateState <- func(s *bState) {
+ defer func() {
+ // recovering if user defined decorator panics for example
+ if p := recover(); p != nil {
+ b.dlogger.Println(p)
+ b.recoveredPanic = p
+ b.toShutdown = !s.completeFlushed
+ b.frameCh <- b.panicToFrame(tw)
+ }
+ }()
+
+ st := newStatistics(s)
+ frame := s.draw(tw, st)
+ frame, b.extendedLines = s.extender(frame, tw, st)
+
+ b.toShutdown = s.toComplete && !s.completeFlushed
+ s.completeFlushed = s.toComplete
+ b.frameCh <- frame
+ }:
+ case <-b.done:
+ s := b.cacheState
+ st := newStatistics(s)
+ frame := s.draw(tw, st)
+ frame, b.extendedLines = s.extender(frame, tw, st)
+ b.frameCh <- frame
+ }
+}
+
+func (b *Bar) panicToFrame(termWidth int) io.Reader {
+ return strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%dv\n", termWidth), b.recoveredPanic))
+}
+
+func (b *Bar) subscribeDecorators() {
+ var amountReceivers []decor.AmountReceiver
+ var shutdownListeners []decor.ShutdownListener
+ var averageAdjusters []decor.AverageAdjuster
+ b.TraverseDecorators(func(d decor.Decorator) {
+ if d, ok := d.(decor.AmountReceiver); ok {
+ amountReceivers = append(amountReceivers, d)
+ }
+ if d, ok := d.(decor.ShutdownListener); ok {
+ shutdownListeners = append(shutdownListeners, d)
+ }
+ if d, ok := d.(decor.AverageAdjuster); ok {
+ averageAdjusters = append(averageAdjusters, d)
+ }
+ })
+ b.operateState <- func(s *bState) {
+ s.amountReceivers = amountReceivers
+ s.shutdownListeners = shutdownListeners
+ s.averageAdjusters = averageAdjusters
+ }
+}
+
+func (b *Bar) refreshTillShutdown() {
+ for {
+ select {
+ case b.container.refreshCh <- time.Now():
+ case <-b.done:
+ return
+ }
+ }
+}
+
+func (b *Bar) wSyncTable() [][]chan int {
+ select {
+ case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }:
+ return <-b.syncTableCh
+ case <-b.done:
+ return b.cacheState.wSyncTable()
+ }
+}
+
+func (s *bState) draw(termWidth int, stat *decor.Statistics) io.Reader {
+ for _, d := range s.pDecorators {
+ s.bufP.WriteString(d.Decor(stat))
+ }
+
+ for _, d := range s.aDecorators {
+ s.bufA.WriteString(d.Decor(stat))
+ }
+
+ s.bufA.WriteByte('\n')
+
+ prependCount := utf8.RuneCount(s.bufP.Bytes())
+ appendCount := utf8.RuneCount(s.bufA.Bytes()) - 1
+
+ if fitWidth := s.width; termWidth > 1 {
+ if !s.trimSpace {
+ // reserve space for edge spaces
+ termWidth -= 2
+ s.bufB.WriteByte(' ')
+ defer s.bufB.WriteByte(' ')
+ }
+ if prependCount+s.width+appendCount > termWidth {
+ fitWidth = termWidth - prependCount - appendCount
+ }
+ s.filler.Fill(s.bufB, fitWidth, stat)
+ }
+
+ return io.MultiReader(s.bufP, s.bufB, s.bufA)
+}
+
+func (s *bState) wSyncTable() [][]chan int {
+ columns := make([]chan int, 0, len(s.pDecorators)+len(s.aDecorators))
+ var pCount int
+ for _, d := range s.pDecorators {
+ if ch, ok := d.Sync(); ok {
+ columns = append(columns, ch)
+ pCount++
+ }
+ }
+ var aCount int
+ for _, d := range s.aDecorators {
+ if ch, ok := d.Sync(); ok {
+ columns = append(columns, ch)
+ aCount++
+ }
+ }
+ table := make([][]chan int, 2)
+ table[0] = columns[0:pCount]
+ table[1] = columns[pCount : pCount+aCount : pCount+aCount]
+ return table
+}
+
+func newStatistics(s *bState) *decor.Statistics {
+ return &decor.Statistics{
+ ID: s.id,
+ Completed: s.completeFlushed,
+ Total: s.total,
+ Current: s.current,
+ }
+}
+
+func extractBaseDecorator(d decor.Decorator) decor.Decorator {
+ if d, ok := d.(decor.Wrapper); ok {
+ return extractBaseDecorator(d.Base())
+ }
+ return d
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/bar_filler.go b/vendor/github.com/vbauerster/mpb/v4/bar_filler.go
new file mode 100644
index 000000000..0d751a68d
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/bar_filler.go
@@ -0,0 +1,137 @@
+package mpb
+
+import (
+ "io"
+ "unicode/utf8"
+
+ "github.com/vbauerster/mpb/v4/decor"
+ "github.com/vbauerster/mpb/v4/internal"
+)
+
+const (
+ rLeft = iota
+ rFill
+ rTip
+ rEmpty
+ rRight
+ rRevTip
+ rRefill
+)
+
+// DefaultBarStyle is applied when bar constructed with *Progress.AddBar method.
+//
+// '1th rune' stands for left boundary rune
+//
+// '2th rune' stands for fill rune
+//
+// '3th rune' stands for tip rune
+//
+// '4th rune' stands for empty rune
+//
+// '5th rune' stands for right boundary rune
+//
+// '6th rune' stands for reverse tip rune
+//
+// '7th rune' stands for refill rune
+//
+const DefaultBarStyle string = "[=>-]<+"
+
+type barFiller struct {
+ format [][]byte
+ tip []byte
+ refill int64
+ reverse bool
+ flush func(w io.Writer, bb [][]byte)
+}
+
+// NewBarFiller constucts mpb.Filler, to be used with *Progress.Add method.
+func NewBarFiller(style string, reverse bool) Filler {
+ if style == "" {
+ style = DefaultBarStyle
+ }
+ bf := &barFiller{
+ format: make([][]byte, utf8.RuneCountInString(style)),
+ }
+ bf.SetStyle(style)
+ bf.SetReverse(reverse)
+ return bf
+}
+
+func (s *barFiller) SetStyle(style string) {
+ if !utf8.ValidString(style) {
+ return
+ }
+ src := make([][]byte, 0, utf8.RuneCountInString(style))
+ for _, r := range style {
+ src = append(src, []byte(string(r)))
+ }
+ copy(s.format, src)
+ if s.reverse {
+ s.tip = s.format[rRevTip]
+ } else {
+ s.tip = s.format[rTip]
+ }
+}
+
+func (s *barFiller) SetReverse(reverse bool) {
+ if reverse {
+ s.tip = s.format[rRevTip]
+ s.flush = func(w io.Writer, bb [][]byte) {
+ for i := len(bb) - 1; i >= 0; i-- {
+ w.Write(bb[i])
+ }
+ }
+ } else {
+ s.tip = s.format[rTip]
+ s.flush = func(w io.Writer, bb [][]byte) {
+ for i := 0; i < len(bb); i++ {
+ w.Write(bb[i])
+ }
+ }
+ }
+ s.reverse = reverse
+}
+
+func (s *barFiller) SetRefill(amount int64) {
+ s.refill = amount
+}
+
+func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
+ // don't count rLeft and rRight as progress
+ width -= 2
+ if width < 2 {
+ return
+ }
+ w.Write(s.format[rLeft])
+ defer w.Write(s.format[rRight])
+
+ bb := make([][]byte, width)
+
+ cwidth := int(internal.PercentageRound(stat.Total, stat.Current, width))
+
+ for i := 0; i < cwidth; i++ {
+ bb[i] = s.format[rFill]
+ }
+
+ if s.refill > 0 {
+ var rwidth int
+ if s.refill > stat.Current {
+ rwidth = cwidth
+ } else {
+ rwidth = int(internal.PercentageRound(stat.Total, int64(s.refill), width))
+ }
+ for i := 0; i < rwidth; i++ {
+ bb[i] = s.format[rRefill]
+ }
+ }
+
+ if cwidth > 0 && cwidth < width {
+ bb[cwidth-1] = s.tip
+ }
+
+ for i := cwidth; i < width; i++ {
+ bb[i] = s.format[rEmpty]
+ }
+
+ s.flush(w, bb)
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/bar_option.go b/vendor/github.com/vbauerster/mpb/v4/bar_option.go
new file mode 100644
index 000000000..bb79ac6a4
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/bar_option.go
@@ -0,0 +1,209 @@
+package mpb
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/vbauerster/mpb/v4/decor"
+)
+
+// BarOption is a function option which changes the default behavior of a bar.
+type BarOption func(*bState)
+
+type mergeWrapper interface {
+ MergeUnwrap() []decor.Decorator
+}
+
+func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) {
+ for _, decorator := range decorators {
+ if mw, ok := decorator.(mergeWrapper); ok {
+ *dest = append(*dest, mw.MergeUnwrap()...)
+ }
+ *dest = append(*dest, decorator)
+ }
+}
+
+// AppendDecorators let you inject decorators to the bar's right side.
+func AppendDecorators(decorators ...decor.Decorator) BarOption {
+ return func(s *bState) {
+ s.addDecorators(&s.aDecorators, decorators...)
+ }
+}
+
+// PrependDecorators let you inject decorators to the bar's left side.
+func PrependDecorators(decorators ...decor.Decorator) BarOption {
+ return func(s *bState) {
+ s.addDecorators(&s.pDecorators, decorators...)
+ }
+}
+
+// BarID sets bar id.
+func BarID(id int) BarOption {
+ return func(s *bState) {
+ s.id = id
+ }
+}
+
+// BarWidth sets bar width independent of the container.
+func BarWidth(width int) BarOption {
+ return func(s *bState) {
+ s.width = width
+ }
+}
+
+// BarReplaceOnComplete is deprecated. Use BarParkTo instead.
+func BarReplaceOnComplete(runningBar *Bar) BarOption {
+ return BarParkTo(runningBar)
+}
+
+// BarParkTo parks constructed bar into the runningBar. In other words,
+// constructed bar will replace runningBar after it has been completed.
+func BarParkTo(runningBar *Bar) BarOption {
+ if runningBar == nil {
+ return nil
+ }
+ return func(s *bState) {
+ s.runningBar = runningBar
+ }
+}
+
+// BarRemoveOnComplete removes bar filler and decorators if any, on
+// complete event.
+func BarRemoveOnComplete() BarOption {
+ return func(s *bState) {
+ s.dropOnComplete = true
+ }
+}
+
+// BarClearOnComplete clears bar filler only, on complete event.
+func BarClearOnComplete() BarOption {
+ return BarOnComplete("")
+}
+
+// BarOnComplete replaces bar filler with message, on complete event.
+func BarOnComplete(message string) BarOption {
+ return func(s *bState) {
+ s.filler = makeBarOnCompleteFiller(s.baseF, message)
+ }
+}
+
+func makeBarOnCompleteFiller(filler Filler, message string) Filler {
+ return FillerFunc(func(w io.Writer, width int, st *decor.Statistics) {
+ if st.Completed {
+ io.WriteString(w, message)
+ } else {
+ filler.Fill(w, width, st)
+ }
+ })
+}
+
+// BarPriority sets bar's priority. Zero is highest priority, i.e. bar
+// will be on top. If `BarReplaceOnComplete` option is supplied, this
+// option is ignored.
+func BarPriority(priority int) BarOption {
+ return func(s *bState) {
+ s.priority = priority
+ }
+}
+
+// BarExtender is an option to extend bar to the next new line, with
+// arbitrary output.
+func BarExtender(extender Filler) BarOption {
+ if extender == nil {
+ return nil
+ }
+ return func(s *bState) {
+ s.extender = makeExtFunc(extender)
+ }
+}
+
+func makeExtFunc(extender Filler) extFunc {
+ buf := new(bytes.Buffer)
+ nl := []byte("\n")
+ return func(r io.Reader, tw int, st *decor.Statistics) (io.Reader, int) {
+ extender.Fill(buf, tw, st)
+ return io.MultiReader(r, buf), bytes.Count(buf.Bytes(), nl)
+ }
+}
+
+// TrimSpace trims bar's edge spaces.
+func TrimSpace() BarOption {
+ return func(s *bState) {
+ s.trimSpace = true
+ }
+}
+
+// BarStyle overrides mpb.DefaultBarStyle, for example BarStyle("╢▌▌░╟").
+// If you need to override `reverse tip` and `refill rune` set 6th and
+// 7th rune respectively, for example BarStyle("[=>-]<+").
+func BarStyle(style string) BarOption {
+ if style == "" {
+ return nil
+ }
+ type styleSetter interface {
+ SetStyle(string)
+ }
+ return func(s *bState) {
+ if t, ok := s.baseF.(styleSetter); ok {
+ t.SetStyle(style)
+ }
+ }
+}
+
+// BarNoPop disables bar pop out of container. Effective when
+// PopCompletedMode of container is enabled.
+func BarNoPop() BarOption {
+ return func(s *bState) {
+ s.noPop = true
+ }
+}
+
+// BarReverse reverse mode, bar will progress from right to left.
+func BarReverse() BarOption {
+ type revSetter interface {
+ SetReverse(bool)
+ }
+ return func(s *bState) {
+ if t, ok := s.baseF.(revSetter); ok {
+ t.SetReverse(true)
+ }
+ }
+}
+
+// SpinnerStyle sets custom spinner style.
+// Effective when Filler type is spinner.
+func SpinnerStyle(frames []string) BarOption {
+ if len(frames) == 0 {
+ return nil
+ }
+ chk := func(filler Filler) (interface{}, bool) {
+ t, ok := filler.(*spinnerFiller)
+ return t, ok
+ }
+ cb := func(t interface{}) {
+ t.(*spinnerFiller).frames = frames
+ }
+ return MakeFillerTypeSpecificBarOption(chk, cb)
+}
+
+// MakeFillerTypeSpecificBarOption makes BarOption specific to Filler's
+// actual type. If you implement your own Filler, so most probably
+// you'll need this. See BarStyle or SpinnerStyle for example.
+func MakeFillerTypeSpecificBarOption(
+ typeChecker func(Filler) (interface{}, bool),
+ cb func(interface{}),
+) BarOption {
+ return func(s *bState) {
+ if t, ok := typeChecker(s.baseF); ok {
+ cb(t)
+ }
+ }
+}
+
+// BarOptOnCond returns option when condition evaluates to true.
+func BarOptOnCond(option BarOption, condition func() bool) BarOption {
+ if condition() {
+ return option
+ }
+ return nil
+}
diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v4/cwriter/writer.go
index 638237c18..9ec1ec66b 100644
--- a/vendor/github.com/vbauerster/mpb/cwriter/writer.go
+++ b/vendor/github.com/vbauerster/mpb/v4/cwriter/writer.go
@@ -7,59 +7,50 @@ import (
"io"
"os"
- isatty "github.com/mattn/go-isatty"
"golang.org/x/crypto/ssh/terminal"
)
-// ESC is the ASCII code for escape character
-const ESC = 27
-
+// NotATTY not a TeleTYpewriter error.
var NotATTY = errors.New("not a terminal")
-var (
- cursorUp = fmt.Sprintf("%c[%dA", ESC, 1)
- clearLine = fmt.Sprintf("%c[2K\r", ESC)
- clearCursorAndLine = cursorUp + clearLine
-)
+var cuuAndEd = fmt.Sprintf("%c[%%dA%[1]c[J", 27)
-// Writer is a buffered the writer that updates the terminal. The
+// Writer is a buffered the writer that updates the terminal. The
// contents of writer will be flushed when Flush is called.
type Writer struct {
out io.Writer
buf bytes.Buffer
- isTerminal bool
- fd int
lineCount int
+ fd uintptr
+ isTerminal bool
}
-// New returns a new Writer with defaults
+// New returns a new Writer with defaults.
func New(out io.Writer) *Writer {
w := &Writer{out: out}
if f, ok := out.(*os.File); ok {
- fd := f.Fd()
- w.isTerminal = isatty.IsTerminal(fd)
- w.fd = int(fd)
+ w.fd = f.Fd()
+ w.isTerminal = terminal.IsTerminal(int(w.fd))
}
return w
}
-// Flush flushes the underlying buffer
-func (w *Writer) Flush(lineCount int) error {
- err := w.clearLines()
- w.lineCount = lineCount
- // WriteTo takes care of w.buf.Reset
- if _, e := w.buf.WriteTo(w.out); err == nil {
- err = e
+// Flush flushes the underlying buffer.
+func (w *Writer) Flush(lineCount int) (err error) {
+ if w.lineCount > 0 {
+ w.clearLines()
}
- return err
+ w.lineCount = lineCount
+ _, err = w.buf.WriteTo(w.out)
+ return
}
-// Write appends the contents of p to the underlying buffer
+// Write appends the contents of p to the underlying buffer.
func (w *Writer) Write(p []byte) (n int, err error) {
return w.buf.Write(p)
}
-// WriteString writes string to the underlying buffer
+// WriteString writes string to the underlying buffer.
func (w *Writer) WriteString(s string) (n int, err error) {
return w.buf.WriteString(s)
}
@@ -73,7 +64,7 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
// GetWidth returns width of underlying terminal.
func (w *Writer) GetWidth() (int, error) {
if w.isTerminal {
- tw, _, err := terminal.GetSize(w.fd)
+ tw, _, err := terminal.GetSize(int(w.fd))
return tw, err
}
return -1, NotATTY
diff --git a/vendor/github.com/vbauerster/mpb/v4/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/v4/cwriter/writer_posix.go
new file mode 100644
index 000000000..3fb8b7d75
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/cwriter/writer_posix.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package cwriter
+
+import "fmt"
+
+func (w *Writer) clearLines() {
+ fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v4/cwriter/writer_windows.go
new file mode 100644
index 000000000..712528900
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/cwriter/writer_windows.go
@@ -0,0 +1,60 @@
+// +build windows
+
+package cwriter
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+)
+
+type coord struct {
+ x int16
+ y int16
+}
+
+type smallRect struct {
+ left int16
+ top int16
+ right int16
+ bottom int16
+}
+
+type consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes uint16
+ window smallRect
+ maximumWindowSize coord
+}
+
+func (w *Writer) clearLines() {
+ if !w.isTerminal {
+ fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
+ }
+ var info consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(w.fd, uintptr(unsafe.Pointer(&info)))
+
+ info.cursorPosition.y -= int16(w.lineCount)
+ if info.cursorPosition.y < 0 {
+ info.cursorPosition.y = 0
+ }
+ procSetConsoleCursorPosition.Call(w.fd, uintptr(uint32(uint16(info.cursorPosition.y))<<16|uint32(uint16(info.cursorPosition.x))))
+
+ // clear the lines
+ cursor := coord{
+ x: info.window.left,
+ y: info.cursorPosition.y,
+ }
+ count := uint32(info.size.x) * uint32(w.lineCount)
+ procFillConsoleOutputCharacter.Call(w.fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(new(uint32))))
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/counters.go b/vendor/github.com/vbauerster/mpb/v4/decor/counters.go
new file mode 100644
index 000000000..32bcdf76a
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/counters.go
@@ -0,0 +1,84 @@
+package decor
+
+import (
+ "fmt"
+)
+
+const (
+ _ = iota
+ UnitKiB
+ UnitKB
+)
+
+// CountersNoUnit is a wrapper around Counters with no unit param.
+func CountersNoUnit(pairFmt string, wcc ...WC) Decorator {
+ return Counters(0, pairFmt, wcc...)
+}
+
+// CountersKibiByte is a wrapper around Counters with predefined unit
+// UnitKiB (bytes/1024).
+func CountersKibiByte(pairFmt string, wcc ...WC) Decorator {
+ return Counters(UnitKiB, pairFmt, wcc...)
+}
+
+// CountersKiloByte is a wrapper around Counters with predefined unit
+// UnitKB (bytes/1000).
+func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
+ return Counters(UnitKB, pairFmt, wcc...)
+}
+
+// Counters decorator with dynamic unit measure adjustment.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `pairFmt` printf compatible verbs for current and total, like "%f" or "%d"
+//
+// `wcc` optional WC config
+//
+// pairFmt example if unit=UnitKB:
+//
+// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB"
+// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB"
+// pairFmt="%d / %d" output: "1MB / 12MB"
+// pairFmt="% d / % d" output: "1 MB / 12 MB"
+//
+func Counters(unit int, pairFmt string, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ d := &countersDecorator{
+ WC: wc.Init(),
+ producer: chooseSizeProducer(unit, pairFmt),
+ }
+ return d
+}
+
+type countersDecorator struct {
+ WC
+ producer func(*Statistics) string
+}
+
+func (d *countersDecorator) Decor(st *Statistics) string {
+ return d.FormatMsg(d.producer(st))
+}
+
+func chooseSizeProducer(unit int, format string) func(*Statistics) string {
+ if format == "" {
+ format = "%d / %d"
+ }
+ switch unit {
+ case UnitKiB:
+ return func(st *Statistics) string {
+ return fmt.Sprintf(format, SizeB1024(st.Current), SizeB1024(st.Total))
+ }
+ case UnitKB:
+ return func(st *Statistics) string {
+ return fmt.Sprintf(format, SizeB1000(st.Current), SizeB1000(st.Total))
+ }
+ default:
+ return func(st *Statistics) string {
+ return fmt.Sprintf(format, st.Current, st.Total)
+ }
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v4/decor/decorator.go
new file mode 100644
index 000000000..5c0d16880
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/decorator.go
@@ -0,0 +1,173 @@
+package decor
+
+import (
+ "fmt"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // DidentRight bit specifies identation direction.
+ // |foo |b | With DidentRight
+ // | foo| b| Without DidentRight
+ DidentRight = 1 << iota
+
+ // DextraSpace bit adds extra space, makes sense with DSyncWidth only.
+ // When DidentRight bit set, the space will be added to the right,
+ // otherwise to the left.
+ DextraSpace
+
+ // DSyncWidth bit enables same column width synchronization.
+ // Effective with multiple bars only.
+ DSyncWidth
+
+ // DSyncWidthR is shortcut for DSyncWidth|DidentRight
+ DSyncWidthR = DSyncWidth | DidentRight
+
+ // DSyncSpace is shortcut for DSyncWidth|DextraSpace
+ DSyncSpace = DSyncWidth | DextraSpace
+
+ // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight
+ DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight
+)
+
+// TimeStyle enum.
+type TimeStyle int
+
+// TimeStyle kinds.
+const (
+ ET_STYLE_GO TimeStyle = iota
+ ET_STYLE_HHMMSS
+ ET_STYLE_HHMM
+ ET_STYLE_MMSS
+)
+
+// Statistics consists of progress related statistics, that Decorator
+// may need.
+type Statistics struct {
+ ID int
+ Completed bool
+ Total int64
+ Current int64
+}
+
+// Decorator interface.
+// Implementors should embed WC type, that way only single method
+// Decor(*Statistics) needs to be implemented, the rest will be handled
+// by WC type.
+type Decorator interface {
+ Configurator
+ Synchronizer
+ Decor(*Statistics) string
+}
+
+// Synchronizer interface.
+// All decorators implement this interface implicitly. Its Sync
+// method exposes width sync channel, if DSyncWidth bit is set.
+type Synchronizer interface {
+ Sync() (chan int, bool)
+}
+
+// Configurator interface.
+type Configurator interface {
+ GetConf() WC
+ SetConf(WC)
+}
+
+// Wrapper interface.
+// If you're implementing custom Decorator by wrapping a built-in one,
+// it is necessary to implement this interface to retain functionality
+// of built-in Decorator.
+type Wrapper interface {
+ Base() Decorator
+}
+
+// AmountReceiver interface.
+// EWMA based decorators need to implement this one.
+type AmountReceiver interface {
+ NextAmount(int64, ...time.Duration)
+}
+
+// ShutdownListener interface.
+// If decorator needs to be notified once upon bar shutdown event, so
+// this is the right interface to implement.
+type ShutdownListener interface {
+ Shutdown()
+}
+
+// AverageAdjuster interface.
+// Average decorators should implement this interface to provide start
+// time adjustment facility, for resume-able tasks.
+type AverageAdjuster interface {
+ AverageAdjust(time.Time)
+}
+
+// CBFunc convenience call back func type.
+type CBFunc func(Decorator)
+
+// Global convenience instances of WC with sync width bit set.
+var (
+ WCSyncWidth = WC{C: DSyncWidth}
+ WCSyncWidthR = WC{C: DSyncWidthR}
+ WCSyncSpace = WC{C: DSyncSpace}
+ WCSyncSpaceR = WC{C: DSyncSpaceR}
+)
+
+// WC is a struct with two public fields W and C, both of int type.
+// W represents width and C represents bit set of width related config.
+// A decorator should embed WC, to enable width synchronization.
+type WC struct {
+ W int
+ C int
+ dynFormat string
+ staticFormat string
+ wsync chan int
+}
+
+// FormatMsg formats final message according to WC.W and WC.C.
+// Should be called by any Decorator implementation.
+func (wc *WC) FormatMsg(msg string) string {
+ if (wc.C & DSyncWidth) != 0 {
+ wc.wsync <- utf8.RuneCountInString(msg)
+ max := <-wc.wsync
+ if (wc.C & DextraSpace) != 0 {
+ max++
+ }
+ return fmt.Sprintf(fmt.Sprintf(wc.dynFormat, max), msg)
+ }
+ return fmt.Sprintf(wc.staticFormat, msg)
+}
+
+// Init initializes width related config.
+func (wc *WC) Init() WC {
+ wc.dynFormat = "%%"
+ if (wc.C & DidentRight) != 0 {
+ wc.dynFormat += "-"
+ }
+ wc.dynFormat += "%ds"
+ wc.staticFormat = fmt.Sprintf(wc.dynFormat, wc.W)
+ if (wc.C & DSyncWidth) != 0 {
+ // it's deliberate choice to override wsync on each Init() call,
+ // this way globals like WCSyncSpace can be reused
+ wc.wsync = make(chan int)
+ }
+ return *wc
+}
+
+// Sync is implementation of Synchronizer interface.
+func (wc *WC) Sync() (chan int, bool) {
+ if (wc.C&DSyncWidth) != 0 && wc.wsync == nil {
+ panic(fmt.Sprintf("%T is not initialized", wc))
+ }
+ return wc.wsync, (wc.C & DSyncWidth) != 0
+}
+
+// GetConf is implementation of Configurator interface.
+func (wc *WC) GetConf() WC {
+ return *wc
+}
+
+// SetConf is implementation of Configurator interface.
+func (wc *WC) SetConf(conf WC) {
+ *wc = conf.Init()
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/doc.go b/vendor/github.com/vbauerster/mpb/v4/decor/doc.go
index 561a8677c..b595e8015 100644
--- a/vendor/github.com/vbauerster/mpb/decor/doc.go
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/doc.go
@@ -1,9 +1,5 @@
-// Copyright (C) 2016-2018 Vladimir Bauer
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
/*
- Package decor contains common decorators used by "github.com/vbauerster/mpb" package.
+ Package decor provides common decorators for "github.com/vbauerster/mpb/v4" module.
Some decorators returned by this package might have a closure state. It is ok to use
decorators concurrently, unless you share the same decorator among multiple
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go
new file mode 100644
index 000000000..ac2873143
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/elapsed.go
@@ -0,0 +1,48 @@
+package decor
+
+import (
+ "time"
+)
+
+// Elapsed decorator. It's wrapper of NewElapsed.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `wcc` optional WC config
+func Elapsed(style TimeStyle, wcc ...WC) Decorator {
+ return NewElapsed(style, time.Now(), wcc...)
+}
+
+// NewElapsed returns elapsed time decorator.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `startTime` start time
+//
+// `wcc` optional WC config
+func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ d := &elapsedDecorator{
+ WC: wc.Init(),
+ startTime: startTime,
+ producer: chooseTimeProducer(style),
+ }
+ return d
+}
+
+type elapsedDecorator struct {
+ WC
+ startTime time.Time
+ producer func(time.Duration) string
+ msg string
+}
+
+func (d *elapsedDecorator) Decor(st *Statistics) string {
+ if !st.Completed {
+ d.msg = d.producer(time.Since(d.startTime))
+ }
+ return d.FormatMsg(d.msg)
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/eta.go b/vendor/github.com/vbauerster/mpb/v4/decor/eta.go
new file mode 100644
index 000000000..818cded17
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/eta.go
@@ -0,0 +1,212 @@
+package decor
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/VividCortex/ewma"
+)
+
+// TimeNormalizer interface. Implementors could be passed into
+// MovingAverageETA, in order to affect i.e. normalize its output.
+type TimeNormalizer interface {
+ Normalize(time.Duration) time.Duration
+}
+
+// TimeNormalizerFunc is function type adapter to convert function
+// into TimeNormalizer.
+type TimeNormalizerFunc func(time.Duration) time.Duration
+
+func (f TimeNormalizerFunc) Normalize(src time.Duration) time.Duration {
+ return f(src)
+}
+
+// EwmaETA exponential-weighted-moving-average based ETA decorator.
+// Note that it's necessary to supply bar.Incr* methods with incremental
+// work duration as second argument, in order for this decorator to
+// work correctly. This decorator is a wrapper of MovingAverageETA.
+func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator {
+ var average MovingAverage
+ if age == 0 {
+ average = ewma.NewMovingAverage()
+ } else {
+ average = ewma.NewMovingAverage(age)
+ }
+ return MovingAverageETA(style, average, nil, wcc...)
+}
+
+// MovingAverageETA decorator relies on MovingAverage implementation to calculate its average.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `average` implementation of MovingAverage interface
+//
+// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
+//
+// `wcc` optional WC config
+func MovingAverageETA(style TimeStyle, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ d := &movingAverageETA{
+ WC: wc.Init(),
+ average: average,
+ normalizer: normalizer,
+ producer: chooseTimeProducer(style),
+ }
+ return d
+}
+
+type movingAverageETA struct {
+ WC
+ average ewma.MovingAverage
+ normalizer TimeNormalizer
+ producer func(time.Duration) string
+}
+
+func (d *movingAverageETA) Decor(st *Statistics) string {
+ v := math.Round(d.average.Value())
+ remaining := time.Duration((st.Total - st.Current) * int64(v))
+ if d.normalizer != nil {
+ remaining = d.normalizer.Normalize(remaining)
+ }
+ return d.FormatMsg(d.producer(remaining))
+}
+
+func (d *movingAverageETA) NextAmount(n int64, wdd ...time.Duration) {
+ var workDuration time.Duration
+ for _, wd := range wdd {
+ workDuration = wd
+ }
+ durPerItem := float64(workDuration) / float64(n)
+ if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) {
+ return
+ }
+ d.average.Add(durPerItem)
+}
+
+// AverageETA decorator. It's wrapper of NewAverageETA.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `wcc` optional WC config
+func AverageETA(style TimeStyle, wcc ...WC) Decorator {
+ return NewAverageETA(style, time.Now(), nil, wcc...)
+}
+
+// NewAverageETA decorator with user provided start time.
+//
+// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
+//
+// `startTime` start time
+//
+// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
+//
+// `wcc` optional WC config
+func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ d := &averageETA{
+ WC: wc.Init(),
+ startTime: startTime,
+ normalizer: normalizer,
+ producer: chooseTimeProducer(style),
+ }
+ return d
+}
+
+type averageETA struct {
+ WC
+ startTime time.Time
+ normalizer TimeNormalizer
+ producer func(time.Duration) string
+}
+
+func (d *averageETA) Decor(st *Statistics) string {
+ var remaining time.Duration
+ if st.Current != 0 {
+ durPerItem := float64(time.Since(d.startTime)) / float64(st.Current)
+ durPerItem = math.Round(durPerItem)
+ remaining = time.Duration((st.Total - st.Current) * int64(durPerItem))
+ if d.normalizer != nil {
+ remaining = d.normalizer.Normalize(remaining)
+ }
+ }
+ return d.FormatMsg(d.producer(remaining))
+}
+
+func (d *averageETA) AverageAdjust(startTime time.Time) {
+ d.startTime = startTime
+}
+
+// MaxTolerateTimeNormalizer returns implementation of TimeNormalizer.
+func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer {
+ var normalized time.Duration
+ var lastCall time.Time
+ return TimeNormalizerFunc(func(remaining time.Duration) time.Duration {
+ if diff := normalized - remaining; diff <= 0 || diff > maxTolerate || remaining < time.Minute {
+ normalized = remaining
+ lastCall = time.Now()
+ return remaining
+ }
+ normalized -= time.Since(lastCall)
+ lastCall = time.Now()
+ return normalized
+ })
+}
+
+// FixedIntervalTimeNormalizer returns implementation of TimeNormalizer.
+func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer {
+ var normalized time.Duration
+ var lastCall time.Time
+ var count int
+ return TimeNormalizerFunc(func(remaining time.Duration) time.Duration {
+ if count == 0 || remaining < time.Minute {
+ count = updInterval
+ normalized = remaining
+ lastCall = time.Now()
+ return remaining
+ }
+ count--
+ normalized -= time.Since(lastCall)
+ lastCall = time.Now()
+ return normalized
+ })
+}
+
+func chooseTimeProducer(style TimeStyle) func(time.Duration) string {
+ switch style {
+ case ET_STYLE_HHMMSS:
+ return func(remaining time.Duration) string {
+ hours := int64(remaining/time.Hour) % 60
+ minutes := int64(remaining/time.Minute) % 60
+ seconds := int64(remaining/time.Second) % 60
+ return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ }
+ case ET_STYLE_HHMM:
+ return func(remaining time.Duration) string {
+ hours := int64(remaining/time.Hour) % 60
+ minutes := int64(remaining/time.Minute) % 60
+ return fmt.Sprintf("%02d:%02d", hours, minutes)
+ }
+ case ET_STYLE_MMSS:
+ return func(remaining time.Duration) string {
+ hours := int64(remaining/time.Hour) % 60
+ minutes := int64(remaining/time.Minute) % 60
+ seconds := int64(remaining/time.Second) % 60
+ if hours > 0 {
+ return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ }
+ return fmt.Sprintf("%02d:%02d", minutes, seconds)
+ }
+ default:
+ return func(remaining time.Duration) string {
+ // strip off nanoseconds
+ return ((remaining / time.Second) * time.Second).String()
+ }
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/merge.go b/vendor/github.com/vbauerster/mpb/v4/decor/merge.go
new file mode 100644
index 000000000..fdf9e107b
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/merge.go
@@ -0,0 +1,97 @@
+package decor
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+// Merge wraps its decorator argument with intention to sync width
+// with several decorators of another bar. Visual example:
+//
+// +----+--------+---------+--------+
+// | B1 | MERGE(D, P1, Pn) |
+// +----+--------+---------+--------+
+// | B2 | D0 | D1 | Dn |
+// +----+--------+---------+--------+
+//
+func Merge(decorator Decorator, placeholders ...WC) Decorator {
+ if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 {
+ return decorator
+ }
+ md := &mergeDecorator{
+ Decorator: decorator,
+ wc: decorator.GetConf(),
+ placeHolders: make([]*placeHolderDecorator, len(placeholders)),
+ }
+ decorator.SetConf(WC{})
+ for i, wc := range placeholders {
+ if (wc.C & DSyncWidth) == 0 {
+ return decorator
+ }
+ md.placeHolders[i] = &placeHolderDecorator{
+ WC: wc.Init(),
+ wch: make(chan int),
+ }
+ }
+ return md
+}
+
+type mergeDecorator struct {
+ Decorator
+ wc WC
+ placeHolders []*placeHolderDecorator
+}
+
+func (d *mergeDecorator) GetConf() WC {
+ return d.wc
+}
+
+func (d *mergeDecorator) SetConf(conf WC) {
+ d.wc = conf.Init()
+}
+
+func (d *mergeDecorator) MergeUnwrap() []Decorator {
+ decorators := make([]Decorator, len(d.placeHolders))
+ for i, ph := range d.placeHolders {
+ decorators[i] = ph
+ }
+ return decorators
+}
+
+func (d *mergeDecorator) Sync() (chan int, bool) {
+ return d.wc.Sync()
+}
+
+func (d *mergeDecorator) Base() Decorator {
+ return d.Decorator
+}
+
+func (d *mergeDecorator) Decor(st *Statistics) string {
+ msg := d.Decorator.Decor(st)
+ msgLen := utf8.RuneCountInString(msg)
+
+ var space int
+ for _, ph := range d.placeHolders {
+ space += <-ph.wch
+ }
+
+ d.wc.wsync <- msgLen - space
+
+ max := <-d.wc.wsync
+ if (d.wc.C & DextraSpace) != 0 {
+ max++
+ }
+ return fmt.Sprintf(fmt.Sprintf(d.wc.dynFormat, max+space), msg)
+}
+
+type placeHolderDecorator struct {
+ WC
+ wch chan int
+}
+
+func (d *placeHolderDecorator) Decor(st *Statistics) string {
+ go func() {
+ d.wch <- utf8.RuneCountInString(d.FormatMsg(""))
+ }()
+ return ""
+}
diff --git a/vendor/github.com/vbauerster/mpb/decor/moving-average.go b/vendor/github.com/vbauerster/mpb/v4/decor/moving_average.go
index fcd268923..933b1f2cd 100644
--- a/vendor/github.com/vbauerster/mpb/decor/moving-average.go
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/moving_average.go
@@ -9,11 +9,7 @@ import (
// MovingAverage is the interface that computes a moving average over
// a time-series stream of numbers. The average may be over a window
// or exponentially decaying.
-type MovingAverage interface {
- Add(float64)
- Value() float64
- Set(float64)
-}
+type MovingAverage = ewma.MovingAverage
type medianWindow [3]float64
@@ -42,26 +38,3 @@ func (s *medianWindow) Set(value float64) {
func NewMedian() MovingAverage {
return new(medianWindow)
}
-
-type medianEwma struct {
- count uint
- median MovingAverage
- MovingAverage
-}
-
-func (s *medianEwma) Add(v float64) {
- s.median.Add(v)
- if s.count >= 2 {
- s.MovingAverage.Add(s.median.Value())
- }
- s.count++
-}
-
-// NewMedianEwma is ewma based MovingAverage, which gets its values
-// from median MovingAverage.
-func NewMedianEwma(age ...float64) MovingAverage {
- return &medianEwma{
- MovingAverage: ewma.NewMovingAverage(age...),
- median: NewMedian(),
- }
-}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/name.go b/vendor/github.com/vbauerster/mpb/v4/decor/name.go
new file mode 100644
index 000000000..2d5865f6c
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/name.go
@@ -0,0 +1,27 @@
+package decor
+
+// Name returns name decorator.
+//
+// `name` string to display
+//
+// `wcc` optional WC config
+func Name(name string, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ d := &nameDecorator{
+ WC: wc.Init(),
+ msg: name,
+ }
+ return d
+}
+
+type nameDecorator struct {
+ WC
+ msg string
+}
+
+func (d *nameDecorator) Decor(st *Statistics) string {
+ return d.FormatMsg(d.msg)
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go
new file mode 100644
index 000000000..714a0ded3
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/on_complete.go
@@ -0,0 +1,36 @@
+package decor
+
+// OnComplete returns decorator, which wraps provided decorator, with
+// sole purpose to display provided message on complete event.
+//
+// `decorator` Decorator to wrap
+//
+// `message` message to display on complete event
+func OnComplete(decorator Decorator, message string) Decorator {
+ d := &onCompleteWrapper{
+ Decorator: decorator,
+ msg: message,
+ }
+ if md, ok := decorator.(*mergeDecorator); ok {
+ d.Decorator, md.Decorator = md.Decorator, d
+ return md
+ }
+ return d
+}
+
+type onCompleteWrapper struct {
+ Decorator
+ msg string
+}
+
+func (d *onCompleteWrapper) Decor(st *Statistics) string {
+ if st.Completed {
+ wc := d.GetConf()
+ return wc.FormatMsg(d.msg)
+ }
+ return d.Decorator.Decor(st)
+}
+
+func (d *onCompleteWrapper) Base() Decorator {
+ return d.Decorator
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v4/decor/percentage.go
new file mode 100644
index 000000000..abf343a35
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/percentage.go
@@ -0,0 +1,72 @@
+package decor
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+
+ "github.com/vbauerster/mpb/v4/internal"
+)
+
+type percentageType float64
+
+func (s percentageType) Format(st fmt.State, verb rune) {
+ var prec int
+ switch verb {
+ case 'd':
+ case 's':
+ prec = -1
+ default:
+ if p, ok := st.Precision(); ok {
+ prec = p
+ } else {
+ prec = 6
+ }
+ }
+
+ io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64))
+
+ if st.Flag(' ') {
+ io.WriteString(st, " ")
+ }
+ io.WriteString(st, "%")
+}
+
+// Percentage returns percentage decorator. It's a wrapper of NewPercentage.
+func Percentage(wcc ...WC) Decorator {
+ return NewPercentage("% d", wcc...)
+}
+
+// NewPercentage percentage decorator with custom fmt string.
+//
+// fmt examples:
+//
+// fmt="%.1f" output: "1.0%"
+// fmt="% .1f" output: "1.0 %"
+// fmt="%d" output: "1%"
+// fmt="% d" output: "1 %"
+//
+func NewPercentage(fmt string, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ if fmt == "" {
+ fmt = "% d"
+ }
+ d := &percentageDecorator{
+ WC: wc.Init(),
+ fmt: fmt,
+ }
+ return d
+}
+
+type percentageDecorator struct {
+ WC
+ fmt string
+}
+
+func (d *percentageDecorator) Decor(st *Statistics) string {
+ p := internal.Percentage(st.Total, st.Current, 100)
+ return d.FormatMsg(fmt.Sprintf(d.fmt, percentageType(p)))
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v4/decor/size_type.go
new file mode 100644
index 000000000..e4b974058
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/size_type.go
@@ -0,0 +1,109 @@
+package decor
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+)
+
+//go:generate stringer -type=SizeB1024 -trimprefix=_i
+//go:generate stringer -type=SizeB1000 -trimprefix=_
+
+const (
+ _ib SizeB1024 = iota + 1
+ _iKiB SizeB1024 = 1 << (iota * 10)
+ _iMiB
+ _iGiB
+ _iTiB
+)
+
+// SizeB1024 named type, which implements fmt.Formatter interface. It
+// adjusts its value according to byte size multiple by 1024 and appends
+// appropriate size marker (KiB, MiB, GiB, TiB).
+type SizeB1024 int64
+
+func (self SizeB1024) Format(st fmt.State, verb rune) {
+ var prec int
+ switch verb {
+ case 'd':
+ case 's':
+ prec = -1
+ default:
+ if p, ok := st.Precision(); ok {
+ prec = p
+ } else {
+ prec = 6
+ }
+ }
+
+ var unit SizeB1024
+ switch {
+ case self < _iKiB:
+ unit = _ib
+ case self < _iMiB:
+ unit = _iKiB
+ case self < _iGiB:
+ unit = _iMiB
+ case self < _iTiB:
+ unit = _iGiB
+ case self <= math.MaxInt64:
+ unit = _iTiB
+ }
+
+ io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
+
+ if st.Flag(' ') {
+ io.WriteString(st, " ")
+ }
+ io.WriteString(st, unit.String())
+}
+
+const (
+ _b SizeB1000 = 1
+ _KB SizeB1000 = _b * 1000
+ _MB SizeB1000 = _KB * 1000
+ _GB SizeB1000 = _MB * 1000
+ _TB SizeB1000 = _GB * 1000
+)
+
+// SizeB1000 named type, which implements fmt.Formatter interface. It
+// adjusts its value according to byte size multiple by 1000 and appends
+// appropriate size marker (KB, MB, GB, TB).
+type SizeB1000 int64
+
+func (self SizeB1000) Format(st fmt.State, verb rune) {
+ var prec int
+ switch verb {
+ case 'd':
+ case 's':
+ prec = -1
+ default:
+ if p, ok := st.Precision(); ok {
+ prec = p
+ } else {
+ prec = 6
+ }
+ }
+
+ var unit SizeB1000
+ switch {
+ case self < _KB:
+ unit = _b
+ case self < _MB:
+ unit = _KB
+ case self < _GB:
+ unit = _MB
+ case self < _TB:
+ unit = _GB
+ case self <= math.MaxInt64:
+ unit = _TB
+ }
+
+ io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
+
+ if st.Flag(' ') {
+ io.WriteString(st, " ")
+ }
+ io.WriteString(st, unit.String())
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/sizeb1000_string.go b/vendor/github.com/vbauerster/mpb/v4/decor/sizeb1000_string.go
new file mode 100644
index 000000000..3f32ef715
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/sizeb1000_string.go
@@ -0,0 +1,41 @@
+// Code generated by "stringer -type=SizeB1000 -trimprefix=_"; DO NOT EDIT.
+
+package decor
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[_b-1]
+ _ = x[_KB-1000]
+ _ = x[_MB-1000000]
+ _ = x[_GB-1000000000]
+ _ = x[_TB-1000000000000]
+}
+
+const (
+ _SizeB1000_name_0 = "b"
+ _SizeB1000_name_1 = "KB"
+ _SizeB1000_name_2 = "MB"
+ _SizeB1000_name_3 = "GB"
+ _SizeB1000_name_4 = "TB"
+)
+
+func (i SizeB1000) String() string {
+ switch {
+ case i == 1:
+ return _SizeB1000_name_0
+ case i == 1000:
+ return _SizeB1000_name_1
+ case i == 1000000:
+ return _SizeB1000_name_2
+ case i == 1000000000:
+ return _SizeB1000_name_3
+ case i == 1000000000000:
+ return _SizeB1000_name_4
+ default:
+ return "SizeB1000(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/sizeb1024_string.go b/vendor/github.com/vbauerster/mpb/v4/decor/sizeb1024_string.go
new file mode 100644
index 000000000..9fca66cc7
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/sizeb1024_string.go
@@ -0,0 +1,41 @@
+// Code generated by "stringer -type=SizeB1024 -trimprefix=_i"; DO NOT EDIT.
+
+package decor
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[_ib-1]
+ _ = x[_iKiB-1024]
+ _ = x[_iMiB-1048576]
+ _ = x[_iGiB-1073741824]
+ _ = x[_iTiB-1099511627776]
+}
+
+const (
+ _SizeB1024_name_0 = "b"
+ _SizeB1024_name_1 = "KiB"
+ _SizeB1024_name_2 = "MiB"
+ _SizeB1024_name_3 = "GiB"
+ _SizeB1024_name_4 = "TiB"
+)
+
+func (i SizeB1024) String() string {
+ switch {
+ case i == 1:
+ return _SizeB1024_name_0
+ case i == 1024:
+ return _SizeB1024_name_1
+ case i == 1048576:
+ return _SizeB1024_name_2
+ case i == 1073741824:
+ return _SizeB1024_name_3
+ case i == 1099511627776:
+ return _SizeB1024_name_4
+ default:
+ return "SizeB1024(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/speed.go b/vendor/github.com/vbauerster/mpb/v4/decor/speed.go
new file mode 100644
index 000000000..795a5536f
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/speed.go
@@ -0,0 +1,175 @@
+package decor
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "time"
+
+ "github.com/VividCortex/ewma"
+)
+
+// SpeedFormatter is wrapper for SizeB1024 and SizeB1000 to format value as speed/s.
+type SpeedFormatter struct {
+ fmt.Formatter
+}
+
+func (self *SpeedFormatter) Format(st fmt.State, verb rune) {
+ self.Formatter.Format(st, verb)
+ io.WriteString(st, "/s")
+}
+
+// EwmaSpeed exponential-weighted-moving-average based speed decorator.
+// Note that it's necessary to supply bar.Incr* methods with incremental
+// work duration as second argument, in order for this decorator to
+// work correctly. This decorator is a wrapper of MovingAverageSpeed.
+func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator {
+ var average MovingAverage
+ if age == 0 {
+ average = ewma.NewMovingAverage()
+ } else {
+ average = ewma.NewMovingAverage(age)
+ }
+ return MovingAverageSpeed(unit, format, average, wcc...)
+}
+
+// MovingAverageSpeed decorator relies on MovingAverage implementation
+// to calculate its average.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `format` printf compatible verb for value, like "%f" or "%d"
+//
+// `average` MovingAverage implementation
+//
+// `wcc` optional WC config
+//
+// format examples:
+//
+// unit=UnitKiB, format="%.1f" output: "1.0MiB/s"
+// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s"
+// unit=UnitKB, format="%.1f" output: "1.0MB/s"
+// unit=UnitKB, format="% .1f" output: "1.0 MB/s"
+//
+func MovingAverageSpeed(unit int, format string, average MovingAverage, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ if format == "" {
+ format = "%.0f"
+ }
+ d := &movingAverageSpeed{
+ WC: wc.Init(),
+ average: average,
+ producer: chooseSpeedProducer(unit, format),
+ }
+ return d
+}
+
+type movingAverageSpeed struct {
+ WC
+ producer func(float64) string
+ average ewma.MovingAverage
+ msg string
+}
+
+func (d *movingAverageSpeed) Decor(st *Statistics) string {
+ if !st.Completed {
+ var speed float64
+ if v := d.average.Value(); v > 0 {
+ speed = 1 / v
+ }
+ d.msg = d.producer(speed * 1e9)
+ }
+ return d.FormatMsg(d.msg)
+}
+
+func (d *movingAverageSpeed) NextAmount(n int64, wdd ...time.Duration) {
+ var workDuration time.Duration
+ for _, wd := range wdd {
+ workDuration = wd
+ }
+ durPerByte := float64(workDuration) / float64(n)
+ if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) {
+ return
+ }
+ d.average.Add(durPerByte)
+}
+
+// AverageSpeed decorator with dynamic unit measure adjustment. It's
+// a wrapper of NewAverageSpeed.
+func AverageSpeed(unit int, format string, wcc ...WC) Decorator {
+ return NewAverageSpeed(unit, format, time.Now(), wcc...)
+}
+
+// NewAverageSpeed decorator with dynamic unit measure adjustment and
+// user provided start time.
+//
+// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+//
+// `format` printf compatible verb for value, like "%f" or "%d"
+//
+// `startTime` start time
+//
+// `wcc` optional WC config
+//
+// format examples:
+//
+// unit=UnitKiB, format="%.1f" output: "1.0MiB/s"
+// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s"
+// unit=UnitKB, format="%.1f" output: "1.0MB/s"
+// unit=UnitKB, format="% .1f" output: "1.0 MB/s"
+//
+func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ if format == "" {
+ format = "%.0f"
+ }
+ d := &averageSpeed{
+ WC: wc.Init(),
+ startTime: startTime,
+ producer: chooseSpeedProducer(unit, format),
+ }
+ return d
+}
+
+type averageSpeed struct {
+ WC
+ startTime time.Time
+ producer func(float64) string
+ msg string
+}
+
+func (d *averageSpeed) Decor(st *Statistics) string {
+ if !st.Completed {
+ speed := float64(st.Current) / float64(time.Since(d.startTime))
+ d.msg = d.producer(speed * 1e9)
+ }
+
+ return d.FormatMsg(d.msg)
+}
+
+func (d *averageSpeed) AverageAdjust(startTime time.Time) {
+ d.startTime = startTime
+}
+
+func chooseSpeedProducer(unit int, format string) func(float64) string {
+ switch unit {
+ case UnitKiB:
+ return func(speed float64) string {
+ return fmt.Sprintf(format, &SpeedFormatter{SizeB1024(math.Round(speed))})
+ }
+ case UnitKB:
+ return func(speed float64) string {
+ return fmt.Sprintf(format, &SpeedFormatter{SizeB1000(math.Round(speed))})
+ }
+ default:
+ return func(speed float64) string {
+ return fmt.Sprintf(format, speed)
+ }
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/decor/spinner.go b/vendor/github.com/vbauerster/mpb/v4/decor/spinner.go
new file mode 100644
index 000000000..24f553142
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/decor/spinner.go
@@ -0,0 +1,35 @@
+package decor
+
+var defaultSpinnerStyle = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "}
+
+// Spinner returns spinner decorator.
+//
+// `frames` spinner frames, if nil or len==0, default is used
+//
+// `wcc` optional WC config
+func Spinner(frames []string, wcc ...WC) Decorator {
+ var wc WC
+ for _, widthConf := range wcc {
+ wc = widthConf
+ }
+ if len(frames) == 0 {
+ frames = defaultSpinnerStyle
+ }
+ d := &spinnerDecorator{
+ WC: wc.Init(),
+ frames: frames,
+ }
+ return d
+}
+
+type spinnerDecorator struct {
+ WC
+ frames []string
+ count uint
+}
+
+func (d *spinnerDecorator) Decor(st *Statistics) string {
+ frame := d.frames[d.count%uint(len(d.frames))]
+ d.count++
+ return d.FormatMsg(frame)
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/doc.go b/vendor/github.com/vbauerster/mpb/v4/doc.go
new file mode 100644
index 000000000..5ada71774
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/doc.go
@@ -0,0 +1,2 @@
+// Package mpb is a library for rendering progress bars in terminal applications.
+package mpb
diff --git a/vendor/github.com/vbauerster/mpb/v4/go.mod b/vendor/github.com/vbauerster/mpb/v4/go.mod
new file mode 100644
index 000000000..0c5ce51f1
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/go.mod
@@ -0,0 +1,9 @@
+module github.com/vbauerster/mpb/v4
+
+require (
+ github.com/VividCortex/ewma v1.1.1
+ golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708
+ golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 // indirect
+)
+
+go 1.13
diff --git a/vendor/github.com/vbauerster/mpb/v4/go.sum b/vendor/github.com/vbauerster/mpb/v4/go.sum
new file mode 100644
index 000000000..94a9f1a28
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/go.sum
@@ -0,0 +1,11 @@
+github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
+github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
+golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 h1:dHtDnRWQtSx0Hjq9kvKFpBh9uPPKfQN70NZZmvssGwk=
+golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/vbauerster/mpb/v4/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v4/internal/percentage.go
new file mode 100644
index 000000000..7e261cb22
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/internal/percentage.go
@@ -0,0 +1,15 @@
+package internal
+
+import "math"
+
+// Percentage is a helper function, to calculate percentage.
+func Percentage(total, current int64, width int) float64 {
+ if total <= 0 {
+ return 0
+ }
+ return float64(int64(width)*current) / float64(total)
+}
+
+func PercentageRound(total, current int64, width int) float64 {
+ return math.Round(Percentage(total, current, width))
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/options.go b/vendor/github.com/vbauerster/mpb/v4/options.go
new file mode 100644
index 000000000..6b34fb340
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/options.go
@@ -0,0 +1,105 @@
+package mpb
+
+import (
+ "io"
+ "io/ioutil"
+ "sync"
+ "time"
+)
+
+// ContainerOption is a function option which changes the default
+// behavior of progress container, if passed to mpb.New(...ContainerOption).
+type ContainerOption func(*pState)
+
+// WithWaitGroup provides means to have a single joint point. If
+// *sync.WaitGroup is provided, you can safely call just p.Wait()
+// without calling Wait() on provided *sync.WaitGroup. Makes sense
+// when there are more than one bar to render.
+func WithWaitGroup(wg *sync.WaitGroup) ContainerOption {
+ return func(s *pState) {
+ s.uwg = wg
+ }
+}
+
+// WithWidth sets container width. Default is 80. Bars inherit this
+// width, as long as no BarWidth is applied.
+func WithWidth(w int) ContainerOption {
+ return func(s *pState) {
+ if w < 0 {
+ return
+ }
+ s.width = w
+ }
+}
+
+// WithRefreshRate overrides default 120ms refresh rate.
+func WithRefreshRate(d time.Duration) ContainerOption {
+ return func(s *pState) {
+ s.rr = d
+ }
+}
+
+// WithManualRefresh disables internal auto refresh time.Ticker.
+// Refresh will occur upon receive value from provided ch.
+func WithManualRefresh(ch <-chan time.Time) ContainerOption {
+ return func(s *pState) {
+ s.refreshSrc = ch
+ }
+}
+
+// WithRenderDelay delays rendering. By default rendering starts as
+// soon as bar is added, with this option it's possible to delay
+// rendering process by keeping provided chan unclosed. In other words
+// rendering will start as soon as provided chan is closed.
+func WithRenderDelay(ch <-chan struct{}) ContainerOption {
+ return func(s *pState) {
+ s.renderDelay = ch
+ }
+}
+
+// WithShutdownNotifier provided chanel will be closed, after all bars
+// have been rendered.
+func WithShutdownNotifier(ch chan struct{}) ContainerOption {
+ return func(s *pState) {
+ s.shutdownNotifier = ch
+ }
+}
+
+// WithOutput overrides default os.Stdout output. Setting it to nil
+// will effectively disable auto refresh rate and discard any output,
+// useful if you want to disable progress bars with little overhead.
+func WithOutput(w io.Writer) ContainerOption {
+ return func(s *pState) {
+ if w == nil {
+ s.refreshSrc = make(chan time.Time)
+ s.output = ioutil.Discard
+ return
+ }
+ s.output = w
+ }
+}
+
+// WithDebugOutput sets debug output.
+func WithDebugOutput(w io.Writer) ContainerOption {
+ if w == nil {
+ return nil
+ }
+ return func(s *pState) {
+ s.debugOut = w
+ }
+}
+
+// PopCompletedMode will pop and stop rendering completed bars.
+func PopCompletedMode() ContainerOption {
+ return func(s *pState) {
+ s.popCompleted = true
+ }
+}
+
+// ContainerOptOnCond returns option when condition evaluates to true.
+func ContainerOptOnCond(option ContainerOption, condition func() bool) ContainerOption {
+ if condition() {
+ return option
+ }
+ return nil
+}
diff --git a/vendor/github.com/vbauerster/mpb/priority_queue.go b/vendor/github.com/vbauerster/mpb/v4/priority_queue.go
index 7bc588c29..29d9bd5a8 100644
--- a/vendor/github.com/vbauerster/mpb/priority_queue.go
+++ b/vendor/github.com/vbauerster/mpb/v4/priority_queue.go
@@ -1,7 +1,5 @@
package mpb
-import "container/heap"
-
// A priorityQueue implements heap.Interface
type priorityQueue []*Bar
@@ -18,23 +16,17 @@ func (pq priorityQueue) Swap(i, j int) {
}
func (pq *priorityQueue) Push(x interface{}) {
- n := len(*pq)
+ s := *pq
bar := x.(*Bar)
- bar.index = n
- *pq = append(*pq, bar)
+ bar.index = len(s)
+ s = append(s, bar)
+ *pq = s
}
func (pq *priorityQueue) Pop() interface{} {
- old := *pq
- n := len(old)
- bar := old[n-1]
+ s := *pq
+ *pq = s[0 : len(s)-1]
+ bar := s[len(s)-1]
bar.index = -1 // for safety
- *pq = old[0 : n-1]
return bar
}
-
-// update modifies the priority of a Bar in the queue.
-func (pq *priorityQueue) update(bar *Bar, priority int) {
- bar.priority = priority
- heap.Fix(pq, bar.index)
-}
diff --git a/vendor/github.com/vbauerster/mpb/v4/progress.go b/vendor/github.com/vbauerster/mpb/v4/progress.go
new file mode 100644
index 000000000..1150d50bd
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/progress.go
@@ -0,0 +1,394 @@
+package mpb
+
+import (
+ "bytes"
+ "container/heap"
+ "context"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/vbauerster/mpb/v4/cwriter"
+ "github.com/vbauerster/mpb/v4/decor"
+)
+
+const (
+ // default RefreshRate
+ prr = 120 * time.Millisecond
+ // default width
+ pwidth = 80
+)
+
+// Progress represents the container that renders Progress bars
+type Progress struct {
+ ctx context.Context
+ uwg *sync.WaitGroup
+ cwg *sync.WaitGroup
+ bwg *sync.WaitGroup
+ operateState chan func(*pState)
+ done chan struct{}
+ refreshCh chan time.Time
+ once sync.Once
+ dlogger *log.Logger
+}
+
+type pState struct {
+ bHeap priorityQueue
+ heapUpdated bool
+ pMatrix map[int][]chan int
+ aMatrix map[int][]chan int
+ barShutdownQueue []*Bar
+ barPopQueue []*Bar
+
+ // following are provided/overrided by user
+ idCount int
+ width int
+ popCompleted bool
+ rr time.Duration
+ uwg *sync.WaitGroup
+ refreshSrc <-chan time.Time
+ renderDelay <-chan struct{}
+ shutdownNotifier chan struct{}
+ parkedBars map[*Bar]*Bar
+ output io.Writer
+ debugOut io.Writer
+}
+
+// New creates new Progress container instance. It's not possible to
+// reuse instance after *Progress.Wait() method has been called.
+func New(options ...ContainerOption) *Progress {
+ return NewWithContext(context.Background(), options...)
+}
+
+// NewWithContext creates new Progress container instance with provided
+// context. It's not possible to reuse instance after *Progress.Wait()
+// method has been called.
+func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
+ s := &pState{
+ bHeap: priorityQueue{},
+ width: pwidth,
+ rr: prr,
+ parkedBars: make(map[*Bar]*Bar),
+ output: os.Stdout,
+ debugOut: ioutil.Discard,
+ }
+
+ for _, opt := range options {
+ if opt != nil {
+ opt(s)
+ }
+ }
+
+ p := &Progress{
+ ctx: ctx,
+ uwg: s.uwg,
+ cwg: new(sync.WaitGroup),
+ bwg: new(sync.WaitGroup),
+ operateState: make(chan func(*pState)),
+ done: make(chan struct{}),
+ dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile),
+ }
+
+ p.cwg.Add(1)
+ go p.serve(s, cwriter.New(s.output))
+ return p
+}
+
+// AddBar creates a new progress bar and adds to the container.
+func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
+ return p.Add(total, NewBarFiller(DefaultBarStyle, false), options...)
+}
+
+// AddSpinner creates a new spinner bar and adds to the container.
+func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar {
+ return p.Add(total, NewSpinnerFiller(DefaultSpinnerStyle, alignment), options...)
+}
+
+// Add creates a bar which renders itself by provided filler.
+// Set total to 0, if you plan to update it later.
+func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar {
+ if filler == nil {
+ filler = NewBarFiller(DefaultBarStyle, false)
+ }
+ p.bwg.Add(1)
+ result := make(chan *Bar)
+ select {
+ case p.operateState <- func(ps *pState) {
+ bs := ps.makeBarState(total, filler, options...)
+ bar := newBar(p, bs)
+ if bs.runningBar != nil {
+ bs.runningBar.noPop = true
+ ps.parkedBars[bs.runningBar] = bar
+ } else {
+ heap.Push(&ps.bHeap, bar)
+ ps.heapUpdated = true
+ }
+ ps.idCount++
+ result <- bar
+ }:
+ bar := <-result
+ bar.subscribeDecorators()
+ return bar
+ case <-p.done:
+ p.bwg.Done()
+ return nil
+ }
+}
+
+func (p *Progress) dropBar(b *Bar) {
+ select {
+ case p.operateState <- func(s *pState) {
+ if b.index < 0 {
+ return
+ }
+ heap.Remove(&s.bHeap, b.index)
+ s.heapUpdated = true
+ }:
+ case <-p.done:
+ }
+}
+
+func (p *Progress) setBarPriority(b *Bar, priority int) {
+ select {
+ case p.operateState <- func(s *pState) {
+ if b.index < 0 {
+ return
+ }
+ b.priority = priority
+ heap.Fix(&s.bHeap, b.index)
+ }:
+ case <-p.done:
+ }
+}
+
+// UpdateBarPriority same as *Bar.SetPriority.
+func (p *Progress) UpdateBarPriority(b *Bar, priority int) {
+ p.setBarPriority(b, priority)
+}
+
+// BarCount returns bars count
+func (p *Progress) BarCount() int {
+ result := make(chan int, 1)
+ select {
+ case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }:
+ return <-result
+ case <-p.done:
+ return 0
+ }
+}
+
+// Wait waits far all bars to complete and finally shutdowns container.
+// After this method has been called, there is no way to reuse *Progress
+// instance.
+func (p *Progress) Wait() {
+ if p.uwg != nil {
+ // wait for user wg
+ p.uwg.Wait()
+ }
+
+ // wait for bars to quit, if any
+ p.bwg.Wait()
+
+ p.once.Do(p.shutdown)
+
+ // wait for container to quit
+ p.cwg.Wait()
+}
+
+func (p *Progress) shutdown() {
+ close(p.done)
+}
+
+func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
+ defer p.cwg.Done()
+
+ p.refreshCh = s.newTicker(p.done)
+
+ for {
+ select {
+ case op := <-p.operateState:
+ op(s)
+ case <-p.refreshCh:
+ if err := s.render(cw); err != nil {
+ go p.dlogger.Println(err)
+ }
+ case <-s.shutdownNotifier:
+ return
+ }
+ }
+}
+
+func (s *pState) render(cw *cwriter.Writer) error {
+ if s.heapUpdated {
+ s.updateSyncMatrix()
+ s.heapUpdated = false
+ }
+ syncWidth(s.pMatrix)
+ syncWidth(s.aMatrix)
+
+ tw, err := cw.GetWidth()
+ if err != nil {
+ tw = s.width
+ }
+ for i := 0; i < s.bHeap.Len(); i++ {
+ bar := s.bHeap[i]
+ go bar.render(tw)
+ }
+
+ return s.flush(cw)
+}
+
+func (s *pState) flush(cw *cwriter.Writer) error {
+ var lineCount int
+ bm := make(map[*Bar]struct{}, s.bHeap.Len())
+ for s.bHeap.Len() > 0 {
+ b := heap.Pop(&s.bHeap).(*Bar)
+ cw.ReadFrom(<-b.frameCh)
+ if b.toShutdown {
+ // shutdown at next flush
+ // this ensures no bar ends up with less than 100% rendered
+ defer func() {
+ s.barShutdownQueue = append(s.barShutdownQueue, b)
+ }()
+ }
+ lineCount += b.extendedLines + 1
+ bm[b] = struct{}{}
+ }
+
+ for _, b := range s.barShutdownQueue {
+ if parkedBar := s.parkedBars[b]; parkedBar != nil {
+ parkedBar.priority = b.priority
+ heap.Push(&s.bHeap, parkedBar)
+ delete(s.parkedBars, b)
+ b.toDrop = true
+ }
+ if b.toDrop {
+ delete(bm, b)
+ s.heapUpdated = true
+ } else if s.popCompleted {
+ if b := b; !b.noPop {
+ defer func() {
+ s.barPopQueue = append(s.barPopQueue, b)
+ }()
+ }
+ }
+ b.cancel()
+ }
+ s.barShutdownQueue = s.barShutdownQueue[0:0]
+
+ for _, b := range s.barPopQueue {
+ delete(bm, b)
+ s.heapUpdated = true
+ lineCount -= b.extendedLines + 1
+ }
+ s.barPopQueue = s.barPopQueue[0:0]
+
+ for b := range bm {
+ heap.Push(&s.bHeap, b)
+ }
+
+ return cw.Flush(lineCount)
+}
+
+func (s *pState) newTicker(done <-chan struct{}) chan time.Time {
+ ch := make(chan time.Time)
+ if s.shutdownNotifier == nil {
+ s.shutdownNotifier = make(chan struct{})
+ }
+ go func() {
+ if s.renderDelay != nil {
+ <-s.renderDelay
+ }
+ if s.refreshSrc == nil {
+ ticker := time.NewTicker(s.rr)
+ defer ticker.Stop()
+ s.refreshSrc = ticker.C
+ }
+ for {
+ select {
+ case tick := <-s.refreshSrc:
+ ch <- tick
+ case <-done:
+ close(s.shutdownNotifier)
+ return
+ }
+ }
+ }()
+ return ch
+}
+
+func (s *pState) updateSyncMatrix() {
+ s.pMatrix = make(map[int][]chan int)
+ s.aMatrix = make(map[int][]chan int)
+ for i := 0; i < s.bHeap.Len(); i++ {
+ bar := s.bHeap[i]
+ table := bar.wSyncTable()
+ pRow, aRow := table[0], table[1]
+
+ for i, ch := range pRow {
+ s.pMatrix[i] = append(s.pMatrix[i], ch)
+ }
+
+ for i, ch := range aRow {
+ s.aMatrix[i] = append(s.aMatrix[i], ch)
+ }
+ }
+}
+
+func (s *pState) makeBarState(total int64, filler Filler, options ...BarOption) *bState {
+ bs := &bState{
+ total: total,
+ baseF: extractBaseFiller(filler),
+ filler: filler,
+ priority: s.idCount,
+ id: s.idCount,
+ width: s.width,
+ debugOut: s.debugOut,
+ extender: func(r io.Reader, _ int, _ *decor.Statistics) (io.Reader, int) {
+ return r, 0
+ },
+ }
+
+ for _, opt := range options {
+ if opt != nil {
+ opt(bs)
+ }
+ }
+
+ if s.popCompleted && !bs.noPop {
+ bs.priority = -1
+ }
+
+ bs.bufP = bytes.NewBuffer(make([]byte, 0, bs.width))
+ bs.bufB = bytes.NewBuffer(make([]byte, 0, bs.width))
+ bs.bufA = bytes.NewBuffer(make([]byte, 0, bs.width))
+
+ return bs
+}
+
+func syncWidth(matrix map[int][]chan int) {
+ for _, column := range matrix {
+ column := column
+ go func() {
+ var maxWidth int
+ for _, ch := range column {
+ if w := <-ch; w > maxWidth {
+ maxWidth = w
+ }
+ }
+ for _, ch := range column {
+ ch <- maxWidth
+ }
+ }()
+ }
+}
+
+func extractBaseFiller(f Filler) Filler {
+ if f, ok := f.(Wrapper); ok {
+ return extractBaseFiller(f.Base())
+ }
+ return f
+}
diff --git a/vendor/github.com/vbauerster/mpb/v4/proxyreader.go b/vendor/github.com/vbauerster/mpb/v4/proxyreader.go
new file mode 100644
index 000000000..736142412
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v4/proxyreader.go
@@ -0,0 +1,45 @@
+package mpb
+
+import (
+ "io"
+ "time"
+)
+
+type proxyReader struct {
+ io.ReadCloser
+ bar *Bar
+ iT time.Time
+}
+
+func (prox *proxyReader) Read(p []byte) (n int, err error) {
+ n, err = prox.ReadCloser.Read(p)
+ if n > 0 {
+ prox.bar.IncrBy(n, time.Since(prox.iT))
+ prox.iT = time.Now()
+ }
+ if err == io.EOF {
+ go func() {
+ prox.bar.SetTotal(0, true)
+ }()
+ }
+ return
+}
+
+type proxyWriterTo struct {
+ *proxyReader
+ wt io.WriterTo
+}
+
+func (prox *proxyWriterTo) WriteTo(w io.Writer) (n int64, err error) {
+ n, err = prox.wt.WriteTo(w)
+ if n > 0 {
+ prox.bar.IncrInt64(n, time.Since(prox.iT))
+ prox.iT = time.Now()
+ }
+ if err == io.EOF {
+ go func() {
+ prox.bar.SetTotal(0, true)
+ }()
+ }
+ return
+}
diff --git a/vendor/github.com/vbauerster/mpb/spinner_filler.go b/vendor/github.com/vbauerster/mpb/v4/spinner_filler.go
index 36299fef0..9f383fb33 100644
--- a/vendor/github.com/vbauerster/mpb/spinner_filler.go
+++ b/vendor/github.com/vbauerster/mpb/v4/spinner_filler.go
@@ -5,7 +5,7 @@ import (
"strings"
"unicode/utf8"
- "github.com/vbauerster/mpb/decor"
+ "github.com/vbauerster/mpb/v4/decor"
)
// SpinnerAlignment enum.
@@ -18,7 +18,8 @@ const (
SpinnerOnRight
)
-var defaultSpinnerStyle = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "}
+// DefaultSpinnerStyle is applied when bar constructed with *Progress.AddSpinner method.
+var DefaultSpinnerStyle = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "}
type spinnerFiller struct {
frames []string
@@ -26,6 +27,18 @@ type spinnerFiller struct {
alignment SpinnerAlignment
}
+// NewSpinnerFiller constucts mpb.Filler, to be used with *Progress.Add method.
+func NewSpinnerFiller(style []string, alignment SpinnerAlignment) Filler {
+ if len(style) == 0 {
+ style = DefaultSpinnerStyle
+ }
+ filler := &spinnerFiller{
+ frames: style,
+ alignment: alignment,
+ }
+ return filler
+}
+
func (s *spinnerFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
frame := s.frames[s.count%uint(len(s.frames))]
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
new file mode 100644
index 000000000..c7f8c7e64
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -0,0 +1,222 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// In Go 1.13, the ed25519 package was promoted to the standard library as
+// crypto/ed25519, and this package became a wrapper for the standard library one.
+//
+// +build !go1.13
+
+// Package ed25519 implements the Ed25519 signature algorithm. See
+// https://ed25519.cr.yp.to/.
+//
+// These functions are also compatible with the “Ed25519†function defined in
+// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// representation includes a public key suffix to make multiple signing
+// operations with the same key more efficient. This package refers to the RFC
+// 8032 private key as the “seedâ€.
+package ed25519
+
+// This code is a port of the public domain, “ref10†implementation of ed25519
+// from SUPERCOP.
+
+import (
+ "bytes"
+ "crypto"
+ cryptorand "crypto/rand"
+ "crypto/sha512"
+ "errors"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/ed25519/internal/edwards25519"
+)
+
+const (
+ // PublicKeySize is the size, in bytes, of public keys as used in this package.
+ PublicKeySize = 32
+ // PrivateKeySize is the size, in bytes, of private keys as used in this package.
+ PrivateKeySize = 64
+ // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
+ SignatureSize = 64
+ // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+ SeedSize = 32
+)
+
+// PublicKey is the type of Ed25519 public keys.
+type PublicKey []byte
+
+// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
+type PrivateKey []byte
+
+// Public returns the PublicKey corresponding to priv.
+func (priv PrivateKey) Public() crypto.PublicKey {
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, priv[32:])
+ return PublicKey(publicKey)
+}
+
+// Seed returns the private key seed corresponding to priv. It is provided for
+// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
+// in this package.
+func (priv PrivateKey) Seed() []byte {
+ seed := make([]byte, SeedSize)
+ copy(seed, priv[:32])
+ return seed
+}
+
+// Sign signs the given message with priv.
+// Ed25519 performs two passes over messages to be signed and therefore cannot
+// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
+// indicate the message hasn't been hashed. This can be achieved by passing
+// crypto.Hash(0) as the value for opts.
+func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
+ if opts.HashFunc() != crypto.Hash(0) {
+ return nil, errors.New("ed25519: cannot sign hashed message")
+ }
+
+ return Sign(priv, message), nil
+}
+
+// GenerateKey generates a public/private key pair using entropy from rand.
+// If rand is nil, crypto/rand.Reader will be used.
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
+ if rand == nil {
+ rand = cryptorand.Reader
+ }
+
+ seed := make([]byte, SeedSize)
+ if _, err := io.ReadFull(rand, seed); err != nil {
+ return nil, nil, err
+ }
+
+ privateKey := NewKeyFromSeed(seed)
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, privateKey[32:])
+
+ return publicKey, privateKey, nil
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will panic if
+// len(seed) is not SeedSize. This function is provided for interoperability
+// with RFC 8032. RFC 8032's private keys correspond to seeds in this
+// package.
+func NewKeyFromSeed(seed []byte) PrivateKey {
+ if l := len(seed); l != SeedSize {
+ panic("ed25519: bad seed length: " + strconv.Itoa(l))
+ }
+
+ digest := sha512.Sum512(seed)
+ digest[0] &= 248
+ digest[31] &= 127
+ digest[31] |= 64
+
+ var A edwards25519.ExtendedGroupElement
+ var hBytes [32]byte
+ copy(hBytes[:], digest[:])
+ edwards25519.GeScalarMultBase(&A, &hBytes)
+ var publicKeyBytes [32]byte
+ A.ToBytes(&publicKeyBytes)
+
+ privateKey := make([]byte, PrivateKeySize)
+ copy(privateKey, seed)
+ copy(privateKey[32:], publicKeyBytes[:])
+
+ return privateKey
+}
+
+// Sign signs the message with privateKey and returns a signature. It will
+// panic if len(privateKey) is not PrivateKeySize.
+func Sign(privateKey PrivateKey, message []byte) []byte {
+ if l := len(privateKey); l != PrivateKeySize {
+ panic("ed25519: bad private key length: " + strconv.Itoa(l))
+ }
+
+ h := sha512.New()
+ h.Write(privateKey[:32])
+
+ var digest1, messageDigest, hramDigest [64]byte
+ var expandedSecretKey [32]byte
+ h.Sum(digest1[:0])
+ copy(expandedSecretKey[:], digest1[:])
+ expandedSecretKey[0] &= 248
+ expandedSecretKey[31] &= 63
+ expandedSecretKey[31] |= 64
+
+ h.Reset()
+ h.Write(digest1[32:])
+ h.Write(message)
+ h.Sum(messageDigest[:0])
+
+ var messageDigestReduced [32]byte
+ edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
+ var R edwards25519.ExtendedGroupElement
+ edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
+
+ var encodedR [32]byte
+ R.ToBytes(&encodedR)
+
+ h.Reset()
+ h.Write(encodedR[:])
+ h.Write(privateKey[32:])
+ h.Write(message)
+ h.Sum(hramDigest[:0])
+ var hramDigestReduced [32]byte
+ edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
+
+ var s [32]byte
+ edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
+
+ signature := make([]byte, SignatureSize)
+ copy(signature[:], encodedR[:])
+ copy(signature[32:], s[:])
+
+ return signature
+}
+
+// Verify reports whether sig is a valid signature of message by publicKey. It
+// will panic if len(publicKey) is not PublicKeySize.
+func Verify(publicKey PublicKey, message, sig []byte) bool {
+ if l := len(publicKey); l != PublicKeySize {
+ panic("ed25519: bad public key length: " + strconv.Itoa(l))
+ }
+
+ if len(sig) != SignatureSize || sig[63]&224 != 0 {
+ return false
+ }
+
+ var A edwards25519.ExtendedGroupElement
+ var publicKeyBytes [32]byte
+ copy(publicKeyBytes[:], publicKey)
+ if !A.FromBytes(&publicKeyBytes) {
+ return false
+ }
+ edwards25519.FeNeg(&A.X, &A.X)
+ edwards25519.FeNeg(&A.T, &A.T)
+
+ h := sha512.New()
+ h.Write(sig[:32])
+ h.Write(publicKey[:])
+ h.Write(message)
+ var digest [64]byte
+ h.Sum(digest[:0])
+
+ var hReduced [32]byte
+ edwards25519.ScReduce(&hReduced, &digest)
+
+ var R edwards25519.ProjectiveGroupElement
+ var s [32]byte
+ copy(s[:], sig[32:])
+
+ // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
+ // the range [0, order) in order to prevent signature malleability.
+ if !edwards25519.ScMinimal(&s) {
+ return false
+ }
+
+ edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
+
+ var checkR [32]byte
+ R.ToBytes(&checkR)
+ return bytes.Equal(sig[:32], checkR[:])
+}
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
new file mode 100644
index 000000000..d1448d8d2
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
@@ -0,0 +1,73 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+
+// Package ed25519 implements the Ed25519 signature algorithm. See
+// https://ed25519.cr.yp.to/.
+//
+// These functions are also compatible with the “Ed25519†function defined in
+// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// representation includes a public key suffix to make multiple signing
+// operations with the same key more efficient. This package refers to the RFC
+// 8032 private key as the “seedâ€.
+//
+// Beginning with Go 1.13, the functionality of this package was moved to the
+// standard library as crypto/ed25519. This package only acts as a compatibility
+// wrapper.
+package ed25519
+
+import (
+ "crypto/ed25519"
+ "io"
+)
+
+const (
+ // PublicKeySize is the size, in bytes, of public keys as used in this package.
+ PublicKeySize = 32
+ // PrivateKeySize is the size, in bytes, of private keys as used in this package.
+ PrivateKeySize = 64
+ // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
+ SignatureSize = 64
+ // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+ SeedSize = 32
+)
+
+// PublicKey is the type of Ed25519 public keys.
+//
+// This type is an alias for crypto/ed25519's PublicKey type.
+// See the crypto/ed25519 package for the methods on this type.
+type PublicKey = ed25519.PublicKey
+
+// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
+//
+// This type is an alias for crypto/ed25519's PrivateKey type.
+// See the crypto/ed25519 package for the methods on this type.
+type PrivateKey = ed25519.PrivateKey
+
+// GenerateKey generates a public/private key pair using entropy from rand.
+// If rand is nil, crypto/rand.Reader will be used.
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
+ return ed25519.GenerateKey(rand)
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will panic if
+// len(seed) is not SeedSize. This function is provided for interoperability
+// with RFC 8032. RFC 8032's private keys correspond to seeds in this
+// package.
+func NewKeyFromSeed(seed []byte) PrivateKey {
+ return ed25519.NewKeyFromSeed(seed)
+}
+
+// Sign signs the message with privateKey and returns a signature. It will
+// panic if len(privateKey) is not PrivateKeySize.
+func Sign(privateKey PrivateKey, message []byte) []byte {
+ return ed25519.Sign(privateKey, message)
+}
+
+// Verify reports whether sig is a valid signature of message by publicKey. It
+// will panic if len(publicKey) is not PublicKeySize.
+func Verify(publicKey PublicKey, message, sig []byte) bool {
+ return ed25519.Verify(publicKey, message, sig)
+}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
new file mode 100644
index 000000000..e39f086c1
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
@@ -0,0 +1,1422 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// These values are from the public domain, “ref10†implementation of ed25519
+// from SUPERCOP.
+
+// d is a constant in the Edwards curve equation.
+var d = FieldElement{
+ -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116,
+}
+
+// d2 is 2*d.
+var d2 = FieldElement{
+ -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199,
+}
+
+// SqrtM1 is the square-root of -1 in the field.
+var SqrtM1 = FieldElement{
+ -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482,
+}
+
+// A is a constant in the Montgomery-form of curve25519.
+var A = FieldElement{
+ 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+}
+
+// bi contains precomputed multiples of the base-point. See the Ed25519 paper
+// for a discussion about how these values are used.
+var bi = [8]PreComputedGroupElement{
+ {
+ FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
+ FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
+ FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
+ },
+ {
+ FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
+ FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
+ FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
+ },
+ {
+ FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
+ FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
+ FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
+ },
+ {
+ FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
+ FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
+ FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
+ },
+ {
+ FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877},
+ FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951},
+ FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784},
+ },
+ {
+ FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436},
+ FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918},
+ FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877},
+ },
+ {
+ FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800},
+ FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305},
+ FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300},
+ },
+ {
+ FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876},
+ FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619},
+ FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683},
+ },
+}
+
+// base contains precomputed multiples of the base-point. See the Ed25519 paper
+// for a discussion about how these values are used.
+var base = [32][8]PreComputedGroupElement{
+ {
+ {
+ FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
+ FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
+ FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
+ },
+ {
+ FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303},
+ FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081},
+ FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697},
+ },
+ {
+ FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
+ FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
+ FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
+ },
+ {
+ FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540},
+ FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397},
+ FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325},
+ },
+ {
+ FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
+ FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
+ FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
+ },
+ {
+ FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777},
+ FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737},
+ FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652},
+ },
+ {
+ FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
+ FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
+ FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
+ },
+ {
+ FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726},
+ FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955},
+ FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425},
+ },
+ },
+ {
+ {
+ FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171},
+ FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510},
+ FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660},
+ },
+ {
+ FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639},
+ FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963},
+ FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950},
+ },
+ {
+ FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568},
+ FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335},
+ FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628},
+ },
+ {
+ FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007},
+ FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772},
+ FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653},
+ },
+ {
+ FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567},
+ FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686},
+ FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372},
+ },
+ {
+ FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887},
+ FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954},
+ FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953},
+ },
+ {
+ FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833},
+ FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532},
+ FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876},
+ },
+ {
+ FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268},
+ FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214},
+ FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038},
+ },
+ },
+ {
+ {
+ FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800},
+ FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645},
+ FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664},
+ },
+ {
+ FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933},
+ FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182},
+ FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222},
+ },
+ {
+ FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991},
+ FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880},
+ FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092},
+ },
+ {
+ FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295},
+ FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788},
+ FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553},
+ },
+ {
+ FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026},
+ FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347},
+ FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033},
+ },
+ {
+ FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395},
+ FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278},
+ FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890},
+ },
+ {
+ FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995},
+ FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596},
+ FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891},
+ },
+ {
+ FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060},
+ FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608},
+ FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606},
+ },
+ },
+ {
+ {
+ FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389},
+ FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016},
+ FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341},
+ },
+ {
+ FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505},
+ FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553},
+ FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655},
+ },
+ {
+ FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220},
+ FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631},
+ FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099},
+ },
+ {
+ FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556},
+ FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749},
+ FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930},
+ },
+ {
+ FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391},
+ FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253},
+ FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066},
+ },
+ {
+ FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958},
+ FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082},
+ FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383},
+ },
+ {
+ FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521},
+ FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807},
+ FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948},
+ },
+ {
+ FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134},
+ FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455},
+ FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629},
+ },
+ },
+ {
+ {
+ FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069},
+ FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746},
+ FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919},
+ },
+ {
+ FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837},
+ FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906},
+ FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771},
+ },
+ {
+ FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817},
+ FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098},
+ FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409},
+ },
+ {
+ FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504},
+ FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727},
+ FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420},
+ },
+ {
+ FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003},
+ FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605},
+ FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384},
+ },
+ {
+ FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701},
+ FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683},
+ FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708},
+ },
+ {
+ FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563},
+ FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260},
+ FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387},
+ },
+ {
+ FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672},
+ FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686},
+ FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665},
+ },
+ },
+ {
+ {
+ FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182},
+ FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277},
+ FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628},
+ },
+ {
+ FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474},
+ FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539},
+ FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822},
+ },
+ {
+ FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970},
+ FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756},
+ FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508},
+ },
+ {
+ FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683},
+ FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655},
+ FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158},
+ },
+ {
+ FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125},
+ FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839},
+ FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664},
+ },
+ {
+ FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294},
+ FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899},
+ FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070},
+ },
+ {
+ FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294},
+ FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949},
+ FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083},
+ },
+ {
+ FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420},
+ FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940},
+ FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396},
+ },
+ },
+ {
+ {
+ FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567},
+ FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127},
+ FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294},
+ },
+ {
+ FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887},
+ FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964},
+ FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195},
+ },
+ {
+ FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244},
+ FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999},
+ FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762},
+ },
+ {
+ FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274},
+ FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236},
+ FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605},
+ },
+ {
+ FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761},
+ FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884},
+ FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482},
+ },
+ {
+ FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638},
+ FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490},
+ FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170},
+ },
+ {
+ FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736},
+ FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124},
+ FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392},
+ },
+ {
+ FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029},
+ FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048},
+ FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958},
+ },
+ },
+ {
+ {
+ FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593},
+ FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071},
+ FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692},
+ },
+ {
+ FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687},
+ FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441},
+ FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001},
+ },
+ {
+ FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460},
+ FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007},
+ FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762},
+ },
+ {
+ FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005},
+ FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674},
+ FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035},
+ },
+ {
+ FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590},
+ FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957},
+ FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812},
+ },
+ {
+ FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740},
+ FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122},
+ FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158},
+ },
+ {
+ FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885},
+ FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140},
+ FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857},
+ },
+ {
+ FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155},
+ FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260},
+ FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483},
+ },
+ },
+ {
+ {
+ FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677},
+ FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815},
+ FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751},
+ },
+ {
+ FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203},
+ FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208},
+ FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230},
+ },
+ {
+ FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850},
+ FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389},
+ FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968},
+ },
+ {
+ FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689},
+ FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880},
+ FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304},
+ },
+ {
+ FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632},
+ FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412},
+ FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566},
+ },
+ {
+ FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038},
+ FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232},
+ FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943},
+ },
+ {
+ FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856},
+ FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738},
+ FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971},
+ },
+ {
+ FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718},
+ FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697},
+ FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883},
+ },
+ },
+ {
+ {
+ FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912},
+ FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358},
+ FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849},
+ },
+ {
+ FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307},
+ FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977},
+ FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335},
+ },
+ {
+ FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644},
+ FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616},
+ FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735},
+ },
+ {
+ FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099},
+ FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341},
+ FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336},
+ },
+ {
+ FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646},
+ FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425},
+ FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388},
+ },
+ {
+ FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743},
+ FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822},
+ FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462},
+ },
+ {
+ FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985},
+ FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702},
+ FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797},
+ },
+ {
+ FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293},
+ FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100},
+ FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688},
+ },
+ },
+ {
+ {
+ FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186},
+ FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610},
+ FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707},
+ },
+ {
+ FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220},
+ FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025},
+ FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044},
+ },
+ {
+ FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992},
+ FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027},
+ FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197},
+ },
+ {
+ FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901},
+ FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952},
+ FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878},
+ },
+ {
+ FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390},
+ FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730},
+ FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730},
+ },
+ {
+ FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180},
+ FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272},
+ FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715},
+ },
+ {
+ FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970},
+ FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772},
+ FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865},
+ },
+ {
+ FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750},
+ FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373},
+ FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348},
+ },
+ },
+ {
+ {
+ FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144},
+ FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195},
+ FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086},
+ },
+ {
+ FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684},
+ FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518},
+ FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233},
+ },
+ {
+ FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793},
+ FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794},
+ FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435},
+ },
+ {
+ FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921},
+ FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518},
+ FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563},
+ },
+ {
+ FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278},
+ FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024},
+ FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030},
+ },
+ {
+ FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783},
+ FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717},
+ FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844},
+ },
+ {
+ FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333},
+ FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048},
+ FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760},
+ },
+ {
+ FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760},
+ FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757},
+ FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112},
+ },
+ },
+ {
+ {
+ FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468},
+ FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184},
+ FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289},
+ },
+ {
+ FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066},
+ FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882},
+ FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226},
+ },
+ {
+ FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101},
+ FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279},
+ FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811},
+ },
+ {
+ FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709},
+ FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714},
+ FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121},
+ },
+ {
+ FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464},
+ FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847},
+ FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400},
+ },
+ {
+ FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414},
+ FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158},
+ FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045},
+ },
+ {
+ FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415},
+ FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459},
+ FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079},
+ },
+ {
+ FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412},
+ FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743},
+ FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836},
+ },
+ },
+ {
+ {
+ FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022},
+ FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429},
+ FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065},
+ },
+ {
+ FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861},
+ FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000},
+ FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101},
+ },
+ {
+ FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815},
+ FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642},
+ FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966},
+ },
+ {
+ FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574},
+ FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742},
+ FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689},
+ },
+ {
+ FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020},
+ FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772},
+ FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982},
+ },
+ {
+ FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953},
+ FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218},
+ FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265},
+ },
+ {
+ FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073},
+ FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325},
+ FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798},
+ },
+ {
+ FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870},
+ FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863},
+ FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927},
+ },
+ },
+ {
+ {
+ FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267},
+ FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663},
+ FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862},
+ },
+ {
+ FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673},
+ FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943},
+ FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020},
+ },
+ {
+ FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238},
+ FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064},
+ FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795},
+ },
+ {
+ FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052},
+ FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904},
+ FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531},
+ },
+ {
+ FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979},
+ FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841},
+ FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431},
+ },
+ {
+ FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324},
+ FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940},
+ FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320},
+ },
+ {
+ FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184},
+ FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114},
+ FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878},
+ },
+ {
+ FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784},
+ FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091},
+ FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585},
+ },
+ },
+ {
+ {
+ FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208},
+ FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864},
+ FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661},
+ },
+ {
+ FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233},
+ FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212},
+ FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525},
+ },
+ {
+ FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068},
+ FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397},
+ FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988},
+ },
+ {
+ FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889},
+ FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038},
+ FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697},
+ },
+ {
+ FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875},
+ FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905},
+ FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656},
+ },
+ {
+ FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818},
+ FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714},
+ FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203},
+ },
+ {
+ FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931},
+ FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024},
+ FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084},
+ },
+ {
+ FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204},
+ FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817},
+ FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667},
+ },
+ },
+ {
+ {
+ FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504},
+ FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768},
+ FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255},
+ },
+ {
+ FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790},
+ FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438},
+ FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333},
+ },
+ {
+ FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971},
+ FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905},
+ FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409},
+ },
+ {
+ FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409},
+ FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499},
+ FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363},
+ },
+ {
+ FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664},
+ FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324},
+ FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940},
+ },
+ {
+ FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990},
+ FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914},
+ FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290},
+ },
+ {
+ FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257},
+ FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433},
+ FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236},
+ },
+ {
+ FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045},
+ FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093},
+ FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347},
+ },
+ },
+ {
+ {
+ FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191},
+ FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507},
+ FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906},
+ },
+ {
+ FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018},
+ FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109},
+ FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926},
+ },
+ {
+ FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528},
+ FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625},
+ FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286},
+ },
+ {
+ FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033},
+ FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866},
+ FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896},
+ },
+ {
+ FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075},
+ FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347},
+ FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437},
+ },
+ {
+ FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165},
+ FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588},
+ FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193},
+ },
+ {
+ FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017},
+ FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883},
+ FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961},
+ },
+ {
+ FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043},
+ FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663},
+ FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362},
+ },
+ },
+ {
+ {
+ FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860},
+ FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466},
+ FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063},
+ },
+ {
+ FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997},
+ FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295},
+ FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369},
+ },
+ {
+ FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385},
+ FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109},
+ FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906},
+ },
+ {
+ FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424},
+ FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185},
+ FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962},
+ },
+ {
+ FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325},
+ FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593},
+ FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404},
+ },
+ {
+ FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644},
+ FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801},
+ FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804},
+ },
+ {
+ FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884},
+ FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577},
+ FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849},
+ },
+ {
+ FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473},
+ FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644},
+ FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319},
+ },
+ },
+ {
+ {
+ FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599},
+ FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768},
+ FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084},
+ },
+ {
+ FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328},
+ FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369},
+ FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920},
+ },
+ {
+ FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815},
+ FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025},
+ FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397},
+ },
+ {
+ FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448},
+ FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981},
+ FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165},
+ },
+ {
+ FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501},
+ FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073},
+ FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861},
+ },
+ {
+ FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845},
+ FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211},
+ FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870},
+ },
+ {
+ FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096},
+ FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803},
+ FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168},
+ },
+ {
+ FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965},
+ FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505},
+ FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598},
+ },
+ },
+ {
+ {
+ FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782},
+ FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900},
+ FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479},
+ },
+ {
+ FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208},
+ FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232},
+ FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719},
+ },
+ {
+ FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271},
+ FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326},
+ FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132},
+ },
+ {
+ FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300},
+ FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570},
+ FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670},
+ },
+ {
+ FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994},
+ FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913},
+ FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317},
+ },
+ {
+ FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730},
+ FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096},
+ FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078},
+ },
+ {
+ FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411},
+ FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905},
+ FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654},
+ },
+ {
+ FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870},
+ FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498},
+ FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579},
+ },
+ },
+ {
+ {
+ FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677},
+ FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647},
+ FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743},
+ },
+ {
+ FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468},
+ FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375},
+ FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155},
+ },
+ {
+ FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725},
+ FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612},
+ FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943},
+ },
+ {
+ FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944},
+ FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928},
+ FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406},
+ },
+ {
+ FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139},
+ FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963},
+ FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693},
+ },
+ {
+ FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734},
+ FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680},
+ FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410},
+ },
+ {
+ FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931},
+ FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654},
+ FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710},
+ },
+ {
+ FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180},
+ FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684},
+ FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895},
+ },
+ },
+ {
+ {
+ FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501},
+ FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413},
+ FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880},
+ },
+ {
+ FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874},
+ FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962},
+ FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899},
+ },
+ {
+ FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152},
+ FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063},
+ FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080},
+ },
+ {
+ FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146},
+ FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183},
+ FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133},
+ },
+ {
+ FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421},
+ FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622},
+ FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197},
+ },
+ {
+ FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663},
+ FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753},
+ FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755},
+ },
+ {
+ FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862},
+ FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118},
+ FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171},
+ },
+ {
+ FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380},
+ FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824},
+ FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270},
+ },
+ },
+ {
+ {
+ FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438},
+ FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584},
+ FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562},
+ },
+ {
+ FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471},
+ FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610},
+ FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269},
+ },
+ {
+ FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650},
+ FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369},
+ FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461},
+ },
+ {
+ FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462},
+ FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793},
+ FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218},
+ },
+ {
+ FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226},
+ FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019},
+ FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037},
+ },
+ {
+ FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171},
+ FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132},
+ FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841},
+ },
+ {
+ FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181},
+ FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210},
+ FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040},
+ },
+ {
+ FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935},
+ FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105},
+ FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814},
+ },
+ },
+ {
+ {
+ FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852},
+ FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581},
+ FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646},
+ },
+ {
+ FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844},
+ FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025},
+ FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453},
+ },
+ {
+ FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068},
+ FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192},
+ FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921},
+ },
+ {
+ FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259},
+ FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426},
+ FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072},
+ },
+ {
+ FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305},
+ FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832},
+ FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943},
+ },
+ {
+ FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011},
+ FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447},
+ FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494},
+ },
+ {
+ FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245},
+ FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859},
+ FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915},
+ },
+ {
+ FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707},
+ FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848},
+ FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224},
+ },
+ },
+ {
+ {
+ FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391},
+ FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215},
+ FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101},
+ },
+ {
+ FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713},
+ FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849},
+ FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930},
+ },
+ {
+ FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940},
+ FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031},
+ FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404},
+ },
+ {
+ FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243},
+ FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116},
+ FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525},
+ },
+ {
+ FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509},
+ FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883},
+ FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865},
+ },
+ {
+ FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660},
+ FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273},
+ FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138},
+ },
+ {
+ FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560},
+ FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135},
+ FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941},
+ },
+ {
+ FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739},
+ FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756},
+ FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819},
+ },
+ },
+ {
+ {
+ FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347},
+ FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028},
+ FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075},
+ },
+ {
+ FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799},
+ FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609},
+ FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817},
+ },
+ {
+ FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989},
+ FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523},
+ FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278},
+ },
+ {
+ FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045},
+ FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377},
+ FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480},
+ },
+ {
+ FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016},
+ FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426},
+ FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525},
+ },
+ {
+ FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396},
+ FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080},
+ FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892},
+ },
+ {
+ FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275},
+ FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074},
+ FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140},
+ },
+ {
+ FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717},
+ FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101},
+ FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127},
+ },
+ },
+ {
+ {
+ FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632},
+ FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415},
+ FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160},
+ },
+ {
+ FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876},
+ FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625},
+ FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478},
+ },
+ {
+ FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164},
+ FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595},
+ FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248},
+ },
+ {
+ FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858},
+ FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193},
+ FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184},
+ },
+ {
+ FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942},
+ FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635},
+ FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948},
+ },
+ {
+ FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935},
+ FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415},
+ FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416},
+ },
+ {
+ FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018},
+ FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778},
+ FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659},
+ },
+ {
+ FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385},
+ FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503},
+ FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329},
+ },
+ },
+ {
+ {
+ FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056},
+ FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838},
+ FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948},
+ },
+ {
+ FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691},
+ FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118},
+ FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517},
+ },
+ {
+ FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269},
+ FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904},
+ FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589},
+ },
+ {
+ FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193},
+ FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910},
+ FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930},
+ },
+ {
+ FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667},
+ FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481},
+ FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876},
+ },
+ {
+ FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640},
+ FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278},
+ FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112},
+ },
+ {
+ FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272},
+ FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012},
+ FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221},
+ },
+ {
+ FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046},
+ FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345},
+ FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310},
+ },
+ },
+ {
+ {
+ FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937},
+ FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636},
+ FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008},
+ },
+ {
+ FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429},
+ FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576},
+ FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066},
+ },
+ {
+ FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490},
+ FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104},
+ FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053},
+ },
+ {
+ FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275},
+ FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511},
+ FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095},
+ },
+ {
+ FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439},
+ FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939},
+ FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424},
+ },
+ {
+ FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310},
+ FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608},
+ FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079},
+ },
+ {
+ FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101},
+ FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418},
+ FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576},
+ },
+ {
+ FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356},
+ FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996},
+ FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099},
+ },
+ },
+ {
+ {
+ FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728},
+ FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658},
+ FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242},
+ },
+ {
+ FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001},
+ FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766},
+ FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373},
+ },
+ {
+ FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458},
+ FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628},
+ FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657},
+ },
+ {
+ FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062},
+ FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616},
+ FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014},
+ },
+ {
+ FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383},
+ FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814},
+ FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718},
+ },
+ {
+ FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417},
+ FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222},
+ FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444},
+ },
+ {
+ FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597},
+ FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970},
+ FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799},
+ },
+ {
+ FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647},
+ FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511},
+ FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032},
+ },
+ },
+ {
+ {
+ FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834},
+ FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461},
+ FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062},
+ },
+ {
+ FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516},
+ FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547},
+ FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240},
+ },
+ {
+ FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038},
+ FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741},
+ FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103},
+ },
+ {
+ FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747},
+ FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323},
+ FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016},
+ },
+ {
+ FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373},
+ FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228},
+ FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141},
+ },
+ {
+ FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399},
+ FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831},
+ FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376},
+ },
+ {
+ FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313},
+ FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958},
+ FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577},
+ },
+ {
+ FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743},
+ FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684},
+ FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476},
+ },
+ },
+}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
new file mode 100644
index 000000000..fd03c252a
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
@@ -0,0 +1,1793 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "encoding/binary"
+
+// This code is a port of the public domain, “ref10†implementation of ed25519
+// from SUPERCOP.
+
+// FieldElement represents an element of the field GF(2^255 - 19). An element
+// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
+// context.
+type FieldElement [10]int32
+
+var zero FieldElement
+
+func FeZero(fe *FieldElement) {
+ copy(fe[:], zero[:])
+}
+
+func FeOne(fe *FieldElement) {
+ FeZero(fe)
+ fe[0] = 1
+}
+
+func FeAdd(dst, a, b *FieldElement) {
+ dst[0] = a[0] + b[0]
+ dst[1] = a[1] + b[1]
+ dst[2] = a[2] + b[2]
+ dst[3] = a[3] + b[3]
+ dst[4] = a[4] + b[4]
+ dst[5] = a[5] + b[5]
+ dst[6] = a[6] + b[6]
+ dst[7] = a[7] + b[7]
+ dst[8] = a[8] + b[8]
+ dst[9] = a[9] + b[9]
+}
+
+func FeSub(dst, a, b *FieldElement) {
+ dst[0] = a[0] - b[0]
+ dst[1] = a[1] - b[1]
+ dst[2] = a[2] - b[2]
+ dst[3] = a[3] - b[3]
+ dst[4] = a[4] - b[4]
+ dst[5] = a[5] - b[5]
+ dst[6] = a[6] - b[6]
+ dst[7] = a[7] - b[7]
+ dst[8] = a[8] - b[8]
+ dst[9] = a[9] - b[9]
+}
+
+func FeCopy(dst, src *FieldElement) {
+ copy(dst[:], src[:])
+}
+
+// Replace (f,g) with (g,g) if b == 1;
+// replace (f,g) with (f,g) if b == 0.
+//
+// Preconditions: b in {0,1}.
+func FeCMove(f, g *FieldElement, b int32) {
+ b = -b
+ f[0] ^= b & (f[0] ^ g[0])
+ f[1] ^= b & (f[1] ^ g[1])
+ f[2] ^= b & (f[2] ^ g[2])
+ f[3] ^= b & (f[3] ^ g[3])
+ f[4] ^= b & (f[4] ^ g[4])
+ f[5] ^= b & (f[5] ^ g[5])
+ f[6] ^= b & (f[6] ^ g[6])
+ f[7] ^= b & (f[7] ^ g[7])
+ f[8] ^= b & (f[8] ^ g[8])
+ f[9] ^= b & (f[9] ^ g[9])
+}
+
+func load3(in []byte) int64 {
+ var r int64
+ r = int64(in[0])
+ r |= int64(in[1]) << 8
+ r |= int64(in[2]) << 16
+ return r
+}
+
+func load4(in []byte) int64 {
+ var r int64
+ r = int64(in[0])
+ r |= int64(in[1]) << 8
+ r |= int64(in[2]) << 16
+ r |= int64(in[3]) << 24
+ return r
+}
+
+func FeFromBytes(dst *FieldElement, src *[32]byte) {
+ h0 := load4(src[:])
+ h1 := load3(src[4:]) << 6
+ h2 := load3(src[7:]) << 5
+ h3 := load3(src[10:]) << 3
+ h4 := load3(src[13:]) << 2
+ h5 := load4(src[16:])
+ h6 := load3(src[20:]) << 7
+ h7 := load3(src[23:]) << 5
+ h8 := load3(src[26:]) << 4
+ h9 := (load3(src[29:]) & 8388607) << 2
+
+ FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+// FeToBytes marshals h to s.
+// Preconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Write p=2^255-19; q=floor(h/p).
+// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+//
+// Proof:
+// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
+//
+// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+// Then 0<y<1.
+//
+// Write r=h-pq.
+// Have 0<=r<=p-1=2^255-20.
+// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
+//
+// Write x=r+19(2^-255)r+y.
+// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
+//
+// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
+// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
+func FeToBytes(s *[32]byte, h *FieldElement) {
+ var carry [10]int32
+
+ q := (19*h[9] + (1 << 24)) >> 25
+ q = (h[0] + q) >> 26
+ q = (h[1] + q) >> 25
+ q = (h[2] + q) >> 26
+ q = (h[3] + q) >> 25
+ q = (h[4] + q) >> 26
+ q = (h[5] + q) >> 25
+ q = (h[6] + q) >> 26
+ q = (h[7] + q) >> 25
+ q = (h[8] + q) >> 26
+ q = (h[9] + q) >> 25
+
+ // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
+ h[0] += 19 * q
+ // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
+
+ carry[0] = h[0] >> 26
+ h[1] += carry[0]
+ h[0] -= carry[0] << 26
+ carry[1] = h[1] >> 25
+ h[2] += carry[1]
+ h[1] -= carry[1] << 25
+ carry[2] = h[2] >> 26
+ h[3] += carry[2]
+ h[2] -= carry[2] << 26
+ carry[3] = h[3] >> 25
+ h[4] += carry[3]
+ h[3] -= carry[3] << 25
+ carry[4] = h[4] >> 26
+ h[5] += carry[4]
+ h[4] -= carry[4] << 26
+ carry[5] = h[5] >> 25
+ h[6] += carry[5]
+ h[5] -= carry[5] << 25
+ carry[6] = h[6] >> 26
+ h[7] += carry[6]
+ h[6] -= carry[6] << 26
+ carry[7] = h[7] >> 25
+ h[8] += carry[7]
+ h[7] -= carry[7] << 25
+ carry[8] = h[8] >> 26
+ h[9] += carry[8]
+ h[8] -= carry[8] << 26
+ carry[9] = h[9] >> 25
+ h[9] -= carry[9] << 25
+ // h10 = carry9
+
+ // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+ // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
+ // evidently 2^255 h10-2^255 q = 0.
+ // Goal: Output h[0]+...+2^230 h[9].
+
+ s[0] = byte(h[0] >> 0)
+ s[1] = byte(h[0] >> 8)
+ s[2] = byte(h[0] >> 16)
+ s[3] = byte((h[0] >> 24) | (h[1] << 2))
+ s[4] = byte(h[1] >> 6)
+ s[5] = byte(h[1] >> 14)
+ s[6] = byte((h[1] >> 22) | (h[2] << 3))
+ s[7] = byte(h[2] >> 5)
+ s[8] = byte(h[2] >> 13)
+ s[9] = byte((h[2] >> 21) | (h[3] << 5))
+ s[10] = byte(h[3] >> 3)
+ s[11] = byte(h[3] >> 11)
+ s[12] = byte((h[3] >> 19) | (h[4] << 6))
+ s[13] = byte(h[4] >> 2)
+ s[14] = byte(h[4] >> 10)
+ s[15] = byte(h[4] >> 18)
+ s[16] = byte(h[5] >> 0)
+ s[17] = byte(h[5] >> 8)
+ s[18] = byte(h[5] >> 16)
+ s[19] = byte((h[5] >> 24) | (h[6] << 1))
+ s[20] = byte(h[6] >> 7)
+ s[21] = byte(h[6] >> 15)
+ s[22] = byte((h[6] >> 23) | (h[7] << 3))
+ s[23] = byte(h[7] >> 5)
+ s[24] = byte(h[7] >> 13)
+ s[25] = byte((h[7] >> 21) | (h[8] << 4))
+ s[26] = byte(h[8] >> 4)
+ s[27] = byte(h[8] >> 12)
+ s[28] = byte((h[8] >> 20) | (h[9] << 6))
+ s[29] = byte(h[9] >> 2)
+ s[30] = byte(h[9] >> 10)
+ s[31] = byte(h[9] >> 18)
+}
+
+func FeIsNegative(f *FieldElement) byte {
+ var s [32]byte
+ FeToBytes(&s, f)
+ return s[0] & 1
+}
+
+func FeIsNonZero(f *FieldElement) int32 {
+ var s [32]byte
+ FeToBytes(&s, f)
+ var x uint8
+ for _, b := range s {
+ x |= b
+ }
+ x |= x >> 4
+ x |= x >> 2
+ x |= x >> 1
+ return int32(x & 1)
+}
+
+// FeNeg sets h = -f
+//
+// Preconditions:
+// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Postconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func FeNeg(h, f *FieldElement) {
+ h[0] = -f[0]
+ h[1] = -f[1]
+ h[2] = -f[2]
+ h[3] = -f[3]
+ h[4] = -f[4]
+ h[5] = -f[5]
+ h[6] = -f[6]
+ h[7] = -f[7]
+ h[8] = -f[8]
+ h[9] = -f[9]
+}
+
+func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
+ var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64
+
+ /*
+ |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
+ i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
+ |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
+ i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
+ */
+
+ c0 = (h0 + (1 << 25)) >> 26
+ h1 += c0
+ h0 -= c0 << 26
+ c4 = (h4 + (1 << 25)) >> 26
+ h5 += c4
+ h4 -= c4 << 26
+ /* |h0| <= 2^25 */
+ /* |h4| <= 2^25 */
+ /* |h1| <= 1.51*2^58 */
+ /* |h5| <= 1.51*2^58 */
+
+ c1 = (h1 + (1 << 24)) >> 25
+ h2 += c1
+ h1 -= c1 << 25
+ c5 = (h5 + (1 << 24)) >> 25
+ h6 += c5
+ h5 -= c5 << 25
+ /* |h1| <= 2^24; from now on fits into int32 */
+ /* |h5| <= 2^24; from now on fits into int32 */
+ /* |h2| <= 1.21*2^59 */
+ /* |h6| <= 1.21*2^59 */
+
+ c2 = (h2 + (1 << 25)) >> 26
+ h3 += c2
+ h2 -= c2 << 26
+ c6 = (h6 + (1 << 25)) >> 26
+ h7 += c6
+ h6 -= c6 << 26
+ /* |h2| <= 2^25; from now on fits into int32 unchanged */
+ /* |h6| <= 2^25; from now on fits into int32 unchanged */
+ /* |h3| <= 1.51*2^58 */
+ /* |h7| <= 1.51*2^58 */
+
+ c3 = (h3 + (1 << 24)) >> 25
+ h4 += c3
+ h3 -= c3 << 25
+ c7 = (h7 + (1 << 24)) >> 25
+ h8 += c7
+ h7 -= c7 << 25
+ /* |h3| <= 2^24; from now on fits into int32 unchanged */
+ /* |h7| <= 2^24; from now on fits into int32 unchanged */
+ /* |h4| <= 1.52*2^33 */
+ /* |h8| <= 1.52*2^33 */
+
+ c4 = (h4 + (1 << 25)) >> 26
+ h5 += c4
+ h4 -= c4 << 26
+ c8 = (h8 + (1 << 25)) >> 26
+ h9 += c8
+ h8 -= c8 << 26
+ /* |h4| <= 2^25; from now on fits into int32 unchanged */
+ /* |h8| <= 2^25; from now on fits into int32 unchanged */
+ /* |h5| <= 1.01*2^24 */
+ /* |h9| <= 1.51*2^58 */
+
+ c9 = (h9 + (1 << 24)) >> 25
+ h0 += c9 * 19
+ h9 -= c9 << 25
+ /* |h9| <= 2^24; from now on fits into int32 unchanged */
+ /* |h0| <= 1.8*2^37 */
+
+ c0 = (h0 + (1 << 25)) >> 26
+ h1 += c0
+ h0 -= c0 << 26
+ /* |h0| <= 2^25; from now on fits into int32 unchanged */
+ /* |h1| <= 1.01*2^24 */
+
+ h[0] = int32(h0)
+ h[1] = int32(h1)
+ h[2] = int32(h2)
+ h[3] = int32(h3)
+ h[4] = int32(h4)
+ h[5] = int32(h5)
+ h[6] = int32(h6)
+ h[7] = int32(h7)
+ h[8] = int32(h8)
+ h[9] = int32(h9)
+}
+
+// FeMul calculates h = f * g
+// Can overlap h with f or g.
+//
+// Preconditions:
+// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Notes on implementation strategy:
+//
+// Using schoolbook multiplication.
+// Karatsuba would save a little in some cost models.
+//
+// Most multiplications by 2 and 19 are 32-bit precomputations;
+// cheaper than 64-bit postcomputations.
+//
+// There is one remaining multiplication by 19 in the carry chain;
+// one *19 precomputation can be merged into this,
+// but the resulting data flow is considerably less clean.
+//
+// There are 12 carries below.
+// 10 of them are 2-way parallelizable and vectorizable.
+// Can get away with 11 carries, but then data flow is much deeper.
+//
+// With tighter constraints on inputs, can squeeze carries into int32.
+func FeMul(h, f, g *FieldElement) {
+ f0 := int64(f[0])
+ f1 := int64(f[1])
+ f2 := int64(f[2])
+ f3 := int64(f[3])
+ f4 := int64(f[4])
+ f5 := int64(f[5])
+ f6 := int64(f[6])
+ f7 := int64(f[7])
+ f8 := int64(f[8])
+ f9 := int64(f[9])
+
+ f1_2 := int64(2 * f[1])
+ f3_2 := int64(2 * f[3])
+ f5_2 := int64(2 * f[5])
+ f7_2 := int64(2 * f[7])
+ f9_2 := int64(2 * f[9])
+
+ g0 := int64(g[0])
+ g1 := int64(g[1])
+ g2 := int64(g[2])
+ g3 := int64(g[3])
+ g4 := int64(g[4])
+ g5 := int64(g[5])
+ g6 := int64(g[6])
+ g7 := int64(g[7])
+ g8 := int64(g[8])
+ g9 := int64(g[9])
+
+ g1_19 := int64(19 * g[1]) /* 1.4*2^29 */
+ g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */
+ g3_19 := int64(19 * g[3])
+ g4_19 := int64(19 * g[4])
+ g5_19 := int64(19 * g[5])
+ g6_19 := int64(19 * g[6])
+ g7_19 := int64(19 * g[7])
+ g8_19 := int64(19 * g[8])
+ g9_19 := int64(19 * g[9])
+
+ h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19
+ h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19
+ h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19
+ h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19
+ h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19
+ h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19
+ h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19
+ h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19
+ h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19
+ h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0
+
+ FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
+ f0 := int64(f[0])
+ f1 := int64(f[1])
+ f2 := int64(f[2])
+ f3 := int64(f[3])
+ f4 := int64(f[4])
+ f5 := int64(f[5])
+ f6 := int64(f[6])
+ f7 := int64(f[7])
+ f8 := int64(f[8])
+ f9 := int64(f[9])
+ f0_2 := int64(2 * f[0])
+ f1_2 := int64(2 * f[1])
+ f2_2 := int64(2 * f[2])
+ f3_2 := int64(2 * f[3])
+ f4_2 := int64(2 * f[4])
+ f5_2 := int64(2 * f[5])
+ f6_2 := int64(2 * f[6])
+ f7_2 := int64(2 * f[7])
+ f5_38 := 38 * f5 // 1.31*2^30
+ f6_19 := 19 * f6 // 1.31*2^30
+ f7_38 := 38 * f7 // 1.31*2^30
+ f8_19 := 19 * f8 // 1.31*2^30
+ f9_38 := 38 * f9 // 1.31*2^30
+
+ h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38
+ h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19
+ h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19
+ h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38
+ h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38
+ h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19
+ h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19
+ h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38
+ h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38
+ h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5
+
+ return
+}
+
+// FeSquare calculates h = f*f. Can overlap h with f.
+//
+// Preconditions:
+// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func FeSquare(h, f *FieldElement) {
+ h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
+ FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+// FeSquare2 sets h = 2 * f * f
+//
+// Can overlap h with f.
+//
+// Preconditions:
+// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+//
+// Postconditions:
+// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+// See fe_mul.c for discussion of implementation strategy.
+func FeSquare2(h, f *FieldElement) {
+ h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
+
+ h0 += h0
+ h1 += h1
+ h2 += h2
+ h3 += h3
+ h4 += h4
+ h5 += h5
+ h6 += h6
+ h7 += h7
+ h8 += h8
+ h9 += h9
+
+ FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+func FeInvert(out, z *FieldElement) {
+ var t0, t1, t2, t3 FieldElement
+ var i int
+
+ FeSquare(&t0, z) // 2^1
+ FeSquare(&t1, &t0) // 2^2
+ for i = 1; i < 2; i++ { // 2^3
+ FeSquare(&t1, &t1)
+ }
+ FeMul(&t1, z, &t1) // 2^3 + 2^0
+ FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0
+ FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1
+ FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0
+ FeSquare(&t2, &t1) // 5,4,3,2,1
+ for i = 1; i < 5; i++ { // 9,8,7,6,5
+ FeSquare(&t2, &t2)
+ }
+ FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0
+ FeSquare(&t2, &t1) // 10..1
+ for i = 1; i < 10; i++ { // 19..10
+ FeSquare(&t2, &t2)
+ }
+ FeMul(&t2, &t2, &t1) // 19..0
+ FeSquare(&t3, &t2) // 20..1
+ for i = 1; i < 20; i++ { // 39..20
+ FeSquare(&t3, &t3)
+ }
+ FeMul(&t2, &t3, &t2) // 39..0
+ FeSquare(&t2, &t2) // 40..1
+ for i = 1; i < 10; i++ { // 49..10
+ FeSquare(&t2, &t2)
+ }
+ FeMul(&t1, &t2, &t1) // 49..0
+ FeSquare(&t2, &t1) // 50..1
+ for i = 1; i < 50; i++ { // 99..50
+ FeSquare(&t2, &t2)
+ }
+ FeMul(&t2, &t2, &t1) // 99..0
+ FeSquare(&t3, &t2) // 100..1
+ for i = 1; i < 100; i++ { // 199..100
+ FeSquare(&t3, &t3)
+ }
+ FeMul(&t2, &t3, &t2) // 199..0
+ FeSquare(&t2, &t2) // 200..1
+ for i = 1; i < 50; i++ { // 249..50
+ FeSquare(&t2, &t2)
+ }
+ FeMul(&t1, &t2, &t1) // 249..0
+ FeSquare(&t1, &t1) // 250..1
+ for i = 1; i < 5; i++ { // 254..5
+ FeSquare(&t1, &t1)
+ }
+ FeMul(out, &t1, &t0) // 254..5,3,1,0
+}
+
+func fePow22523(out, z *FieldElement) {
+ var t0, t1, t2 FieldElement
+ var i int
+
+ FeSquare(&t0, z)
+ for i = 1; i < 1; i++ {
+ FeSquare(&t0, &t0)
+ }
+ FeSquare(&t1, &t0)
+ for i = 1; i < 2; i++ {
+ FeSquare(&t1, &t1)
+ }
+ FeMul(&t1, z, &t1)
+ FeMul(&t0, &t0, &t1)
+ FeSquare(&t0, &t0)
+ for i = 1; i < 1; i++ {
+ FeSquare(&t0, &t0)
+ }
+ FeMul(&t0, &t1, &t0)
+ FeSquare(&t1, &t0)
+ for i = 1; i < 5; i++ {
+ FeSquare(&t1, &t1)
+ }
+ FeMul(&t0, &t1, &t0)
+ FeSquare(&t1, &t0)
+ for i = 1; i < 10; i++ {
+ FeSquare(&t1, &t1)
+ }
+ FeMul(&t1, &t1, &t0)
+ FeSquare(&t2, &t1)
+ for i = 1; i < 20; i++ {
+ FeSquare(&t2, &t2)
+ }
+ FeMul(&t1, &t2, &t1)
+ FeSquare(&t1, &t1)
+ for i = 1; i < 10; i++ {
+ FeSquare(&t1, &t1)
+ }
+ FeMul(&t0, &t1, &t0)
+ FeSquare(&t1, &t0)
+ for i = 1; i < 50; i++ {
+ FeSquare(&t1, &t1)
+ }
+ FeMul(&t1, &t1, &t0)
+ FeSquare(&t2, &t1)
+ for i = 1; i < 100; i++ {
+ FeSquare(&t2, &t2)
+ }
+ FeMul(&t1, &t2, &t1)
+ FeSquare(&t1, &t1)
+ for i = 1; i < 50; i++ {
+ FeSquare(&t1, &t1)
+ }
+ FeMul(&t0, &t1, &t0)
+ FeSquare(&t0, &t0)
+ for i = 1; i < 2; i++ {
+ FeSquare(&t0, &t0)
+ }
+ FeMul(out, &t0, z)
+}
+
+// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 *
+// y^2 where d = -121665/121666.
+//
+// Several representations are used:
+// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z
+// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT
+// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T
+// PreComputedGroupElement: (y+x,y-x,2dxy)
+
+type ProjectiveGroupElement struct {
+ X, Y, Z FieldElement
+}
+
+type ExtendedGroupElement struct {
+ X, Y, Z, T FieldElement
+}
+
+type CompletedGroupElement struct {
+ X, Y, Z, T FieldElement
+}
+
+type PreComputedGroupElement struct {
+ yPlusX, yMinusX, xy2d FieldElement
+}
+
+type CachedGroupElement struct {
+ yPlusX, yMinusX, Z, T2d FieldElement
+}
+
+func (p *ProjectiveGroupElement) Zero() {
+ FeZero(&p.X)
+ FeOne(&p.Y)
+ FeOne(&p.Z)
+}
+
+func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) {
+ var t0 FieldElement
+
+ FeSquare(&r.X, &p.X)
+ FeSquare(&r.Z, &p.Y)
+ FeSquare2(&r.T, &p.Z)
+ FeAdd(&r.Y, &p.X, &p.Y)
+ FeSquare(&t0, &r.Y)
+ FeAdd(&r.Y, &r.Z, &r.X)
+ FeSub(&r.Z, &r.Z, &r.X)
+ FeSub(&r.X, &t0, &r.Y)
+ FeSub(&r.T, &r.T, &r.Z)
+}
+
+func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) {
+ var recip, x, y FieldElement
+
+ FeInvert(&recip, &p.Z)
+ FeMul(&x, &p.X, &recip)
+ FeMul(&y, &p.Y, &recip)
+ FeToBytes(s, &y)
+ s[31] ^= FeIsNegative(&x) << 7
+}
+
+func (p *ExtendedGroupElement) Zero() {
+ FeZero(&p.X)
+ FeOne(&p.Y)
+ FeOne(&p.Z)
+ FeZero(&p.T)
+}
+
+func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) {
+ var q ProjectiveGroupElement
+ p.ToProjective(&q)
+ q.Double(r)
+}
+
+func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) {
+ FeAdd(&r.yPlusX, &p.Y, &p.X)
+ FeSub(&r.yMinusX, &p.Y, &p.X)
+ FeCopy(&r.Z, &p.Z)
+ FeMul(&r.T2d, &p.T, &d2)
+}
+
+func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) {
+ FeCopy(&r.X, &p.X)
+ FeCopy(&r.Y, &p.Y)
+ FeCopy(&r.Z, &p.Z)
+}
+
+func (p *ExtendedGroupElement) ToBytes(s *[32]byte) {
+ var recip, x, y FieldElement
+
+ FeInvert(&recip, &p.Z)
+ FeMul(&x, &p.X, &recip)
+ FeMul(&y, &p.Y, &recip)
+ FeToBytes(s, &y)
+ s[31] ^= FeIsNegative(&x) << 7
+}
+
+func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool {
+ var u, v, v3, vxx, check FieldElement
+
+ FeFromBytes(&p.Y, s)
+ FeOne(&p.Z)
+ FeSquare(&u, &p.Y)
+ FeMul(&v, &u, &d)
+ FeSub(&u, &u, &p.Z) // y = y^2-1
+ FeAdd(&v, &v, &p.Z) // v = dy^2+1
+
+ FeSquare(&v3, &v)
+ FeMul(&v3, &v3, &v) // v3 = v^3
+ FeSquare(&p.X, &v3)
+ FeMul(&p.X, &p.X, &v)
+ FeMul(&p.X, &p.X, &u) // x = uv^7
+
+ fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8)
+ FeMul(&p.X, &p.X, &v3)
+ FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8)
+
+ var tmpX, tmp2 [32]byte
+
+ FeSquare(&vxx, &p.X)
+ FeMul(&vxx, &vxx, &v)
+ FeSub(&check, &vxx, &u) // vx^2-u
+ if FeIsNonZero(&check) == 1 {
+ FeAdd(&check, &vxx, &u) // vx^2+u
+ if FeIsNonZero(&check) == 1 {
+ return false
+ }
+ FeMul(&p.X, &p.X, &SqrtM1)
+
+ FeToBytes(&tmpX, &p.X)
+ for i, v := range tmpX {
+ tmp2[31-i] = v
+ }
+ }
+
+ if FeIsNegative(&p.X) != (s[31] >> 7) {
+ FeNeg(&p.X, &p.X)
+ }
+
+ FeMul(&p.T, &p.X, &p.Y)
+ return true
+}
+
+func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) {
+ FeMul(&r.X, &p.X, &p.T)
+ FeMul(&r.Y, &p.Y, &p.Z)
+ FeMul(&r.Z, &p.Z, &p.T)
+}
+
+func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) {
+ FeMul(&r.X, &p.X, &p.T)
+ FeMul(&r.Y, &p.Y, &p.Z)
+ FeMul(&r.Z, &p.Z, &p.T)
+ FeMul(&r.T, &p.X, &p.Y)
+}
+
+func (p *PreComputedGroupElement) Zero() {
+ FeOne(&p.yPlusX)
+ FeOne(&p.yMinusX)
+ FeZero(&p.xy2d)
+}
+
+func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
+ var t0 FieldElement
+
+ FeAdd(&r.X, &p.Y, &p.X)
+ FeSub(&r.Y, &p.Y, &p.X)
+ FeMul(&r.Z, &r.X, &q.yPlusX)
+ FeMul(&r.Y, &r.Y, &q.yMinusX)
+ FeMul(&r.T, &q.T2d, &p.T)
+ FeMul(&r.X, &p.Z, &q.Z)
+ FeAdd(&t0, &r.X, &r.X)
+ FeSub(&r.X, &r.Z, &r.Y)
+ FeAdd(&r.Y, &r.Z, &r.Y)
+ FeAdd(&r.Z, &t0, &r.T)
+ FeSub(&r.T, &t0, &r.T)
+}
+
+func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
+ var t0 FieldElement
+
+ FeAdd(&r.X, &p.Y, &p.X)
+ FeSub(&r.Y, &p.Y, &p.X)
+ FeMul(&r.Z, &r.X, &q.yMinusX)
+ FeMul(&r.Y, &r.Y, &q.yPlusX)
+ FeMul(&r.T, &q.T2d, &p.T)
+ FeMul(&r.X, &p.Z, &q.Z)
+ FeAdd(&t0, &r.X, &r.X)
+ FeSub(&r.X, &r.Z, &r.Y)
+ FeAdd(&r.Y, &r.Z, &r.Y)
+ FeSub(&r.Z, &t0, &r.T)
+ FeAdd(&r.T, &t0, &r.T)
+}
+
+func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
+ var t0 FieldElement
+
+ FeAdd(&r.X, &p.Y, &p.X)
+ FeSub(&r.Y, &p.Y, &p.X)
+ FeMul(&r.Z, &r.X, &q.yPlusX)
+ FeMul(&r.Y, &r.Y, &q.yMinusX)
+ FeMul(&r.T, &q.xy2d, &p.T)
+ FeAdd(&t0, &p.Z, &p.Z)
+ FeSub(&r.X, &r.Z, &r.Y)
+ FeAdd(&r.Y, &r.Z, &r.Y)
+ FeAdd(&r.Z, &t0, &r.T)
+ FeSub(&r.T, &t0, &r.T)
+}
+
+func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
+ var t0 FieldElement
+
+ FeAdd(&r.X, &p.Y, &p.X)
+ FeSub(&r.Y, &p.Y, &p.X)
+ FeMul(&r.Z, &r.X, &q.yMinusX)
+ FeMul(&r.Y, &r.Y, &q.yPlusX)
+ FeMul(&r.T, &q.xy2d, &p.T)
+ FeAdd(&t0, &p.Z, &p.Z)
+ FeSub(&r.X, &r.Z, &r.Y)
+ FeAdd(&r.Y, &r.Z, &r.Y)
+ FeSub(&r.Z, &t0, &r.T)
+ FeAdd(&r.T, &t0, &r.T)
+}
+
+func slide(r *[256]int8, a *[32]byte) {
+ for i := range r {
+ r[i] = int8(1 & (a[i>>3] >> uint(i&7)))
+ }
+
+ for i := range r {
+ if r[i] != 0 {
+ for b := 1; b <= 6 && i+b < 256; b++ {
+ if r[i+b] != 0 {
+ if r[i]+(r[i+b]<<uint(b)) <= 15 {
+ r[i] += r[i+b] << uint(b)
+ r[i+b] = 0
+ } else if r[i]-(r[i+b]<<uint(b)) >= -15 {
+ r[i] -= r[i+b] << uint(b)
+ for k := i + b; k < 256; k++ {
+ if r[k] == 0 {
+ r[k] = 1
+ break
+ }
+ r[k] = 0
+ }
+ } else {
+ break
+ }
+ }
+ }
+ }
+ }
+}
+
+// GeDoubleScalarMultVartime sets r = a*A + b*B
+// where a = a[0]+256*a[1]+...+256^31 a[31].
+// and b = b[0]+256*b[1]+...+256^31 b[31].
+// B is the Ed25519 base point (x,4/5) with x positive.
+func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) {
+ var aSlide, bSlide [256]int8
+ var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A
+ var t CompletedGroupElement
+ var u, A2 ExtendedGroupElement
+ var i int
+
+ slide(&aSlide, a)
+ slide(&bSlide, b)
+
+ A.ToCached(&Ai[0])
+ A.Double(&t)
+ t.ToExtended(&A2)
+
+ for i := 0; i < 7; i++ {
+ geAdd(&t, &A2, &Ai[i])
+ t.ToExtended(&u)
+ u.ToCached(&Ai[i+1])
+ }
+
+ r.Zero()
+
+ for i = 255; i >= 0; i-- {
+ if aSlide[i] != 0 || bSlide[i] != 0 {
+ break
+ }
+ }
+
+ for ; i >= 0; i-- {
+ r.Double(&t)
+
+ if aSlide[i] > 0 {
+ t.ToExtended(&u)
+ geAdd(&t, &u, &Ai[aSlide[i]/2])
+ } else if aSlide[i] < 0 {
+ t.ToExtended(&u)
+ geSub(&t, &u, &Ai[(-aSlide[i])/2])
+ }
+
+ if bSlide[i] > 0 {
+ t.ToExtended(&u)
+ geMixedAdd(&t, &u, &bi[bSlide[i]/2])
+ } else if bSlide[i] < 0 {
+ t.ToExtended(&u)
+ geMixedSub(&t, &u, &bi[(-bSlide[i])/2])
+ }
+
+ t.ToProjective(r)
+ }
+}
+
+// equal returns 1 if b == c and 0 otherwise, assuming that b and c are
+// non-negative.
+func equal(b, c int32) int32 {
+ x := uint32(b ^ c)
+ x--
+ return int32(x >> 31)
+}
+
+// negative returns 1 if b < 0 and 0 otherwise.
+func negative(b int32) int32 {
+ return (b >> 31) & 1
+}
+
+func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) {
+ FeCMove(&t.yPlusX, &u.yPlusX, b)
+ FeCMove(&t.yMinusX, &u.yMinusX, b)
+ FeCMove(&t.xy2d, &u.xy2d, b)
+}
+
+func selectPoint(t *PreComputedGroupElement, pos int32, b int32) {
+ var minusT PreComputedGroupElement
+ bNegative := negative(b)
+ bAbs := b - (((-bNegative) & b) << 1)
+
+ t.Zero()
+ for i := int32(0); i < 8; i++ {
+ PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1))
+ }
+ FeCopy(&minusT.yPlusX, &t.yMinusX)
+ FeCopy(&minusT.yMinusX, &t.yPlusX)
+ FeNeg(&minusT.xy2d, &t.xy2d)
+ PreComputedGroupElementCMove(t, &minusT, bNegative)
+}
+
+// GeScalarMultBase computes h = a*B, where
+// a = a[0]+256*a[1]+...+256^31 a[31]
+// B is the Ed25519 base point (x,4/5) with x positive.
+//
+// Preconditions:
+// a[31] <= 127
+func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) {
+ var e [64]int8
+
+ for i, v := range a {
+ e[2*i] = int8(v & 15)
+ e[2*i+1] = int8((v >> 4) & 15)
+ }
+
+ // each e[i] is between 0 and 15 and e[63] is between 0 and 7.
+
+ carry := int8(0)
+ for i := 0; i < 63; i++ {
+ e[i] += carry
+ carry = (e[i] + 8) >> 4
+ e[i] -= carry << 4
+ }
+ e[63] += carry
+ // each e[i] is between -8 and 8.
+
+ h.Zero()
+ var t PreComputedGroupElement
+ var r CompletedGroupElement
+ for i := int32(1); i < 64; i += 2 {
+ selectPoint(&t, i/2, int32(e[i]))
+ geMixedAdd(&r, h, &t)
+ r.ToExtended(h)
+ }
+
+ var s ProjectiveGroupElement
+
+ h.Double(&r)
+ r.ToProjective(&s)
+ s.Double(&r)
+ r.ToProjective(&s)
+ s.Double(&r)
+ r.ToProjective(&s)
+ s.Double(&r)
+ r.ToExtended(h)
+
+ for i := int32(0); i < 64; i += 2 {
+ selectPoint(&t, i/2, int32(e[i]))
+ geMixedAdd(&r, h, &t)
+ r.ToExtended(h)
+ }
+}
+
+// The scalars are GF(2^252 + 27742317777372353535851937790883648493).
+
+// Input:
+// a[0]+256*a[1]+...+256^31*a[31] = a
+// b[0]+256*b[1]+...+256^31*b[31] = b
+// c[0]+256*c[1]+...+256^31*c[31] = c
+//
+// Output:
+// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
+// where l = 2^252 + 27742317777372353535851937790883648493.
+func ScMulAdd(s, a, b, c *[32]byte) {
+ a0 := 2097151 & load3(a[:])
+ a1 := 2097151 & (load4(a[2:]) >> 5)
+ a2 := 2097151 & (load3(a[5:]) >> 2)
+ a3 := 2097151 & (load4(a[7:]) >> 7)
+ a4 := 2097151 & (load4(a[10:]) >> 4)
+ a5 := 2097151 & (load3(a[13:]) >> 1)
+ a6 := 2097151 & (load4(a[15:]) >> 6)
+ a7 := 2097151 & (load3(a[18:]) >> 3)
+ a8 := 2097151 & load3(a[21:])
+ a9 := 2097151 & (load4(a[23:]) >> 5)
+ a10 := 2097151 & (load3(a[26:]) >> 2)
+ a11 := (load4(a[28:]) >> 7)
+ b0 := 2097151 & load3(b[:])
+ b1 := 2097151 & (load4(b[2:]) >> 5)
+ b2 := 2097151 & (load3(b[5:]) >> 2)
+ b3 := 2097151 & (load4(b[7:]) >> 7)
+ b4 := 2097151 & (load4(b[10:]) >> 4)
+ b5 := 2097151 & (load3(b[13:]) >> 1)
+ b6 := 2097151 & (load4(b[15:]) >> 6)
+ b7 := 2097151 & (load3(b[18:]) >> 3)
+ b8 := 2097151 & load3(b[21:])
+ b9 := 2097151 & (load4(b[23:]) >> 5)
+ b10 := 2097151 & (load3(b[26:]) >> 2)
+ b11 := (load4(b[28:]) >> 7)
+ c0 := 2097151 & load3(c[:])
+ c1 := 2097151 & (load4(c[2:]) >> 5)
+ c2 := 2097151 & (load3(c[5:]) >> 2)
+ c3 := 2097151 & (load4(c[7:]) >> 7)
+ c4 := 2097151 & (load4(c[10:]) >> 4)
+ c5 := 2097151 & (load3(c[13:]) >> 1)
+ c6 := 2097151 & (load4(c[15:]) >> 6)
+ c7 := 2097151 & (load3(c[18:]) >> 3)
+ c8 := 2097151 & load3(c[21:])
+ c9 := 2097151 & (load4(c[23:]) >> 5)
+ c10 := 2097151 & (load3(c[26:]) >> 2)
+ c11 := (load4(c[28:]) >> 7)
+ var carry [23]int64
+
+ s0 := c0 + a0*b0
+ s1 := c1 + a0*b1 + a1*b0
+ s2 := c2 + a0*b2 + a1*b1 + a2*b0
+ s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0
+ s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0
+ s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0
+ s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0
+ s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0
+ s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0
+ s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0
+ s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0
+ s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0
+ s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1
+ s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2
+ s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3
+ s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4
+ s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5
+ s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6
+ s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7
+ s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8
+ s20 := a9*b11 + a10*b10 + a11*b9
+ s21 := a10*b11 + a11*b10
+ s22 := a11 * b11
+ s23 := int64(0)
+
+ carry[0] = (s0 + (1 << 20)) >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[2] = (s2 + (1 << 20)) >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[4] = (s4 + (1 << 20)) >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[12] = (s12 + (1 << 20)) >> 21
+ s13 += carry[12]
+ s12 -= carry[12] << 21
+ carry[14] = (s14 + (1 << 20)) >> 21
+ s15 += carry[14]
+ s14 -= carry[14] << 21
+ carry[16] = (s16 + (1 << 20)) >> 21
+ s17 += carry[16]
+ s16 -= carry[16] << 21
+ carry[18] = (s18 + (1 << 20)) >> 21
+ s19 += carry[18]
+ s18 -= carry[18] << 21
+ carry[20] = (s20 + (1 << 20)) >> 21
+ s21 += carry[20]
+ s20 -= carry[20] << 21
+ carry[22] = (s22 + (1 << 20)) >> 21
+ s23 += carry[22]
+ s22 -= carry[22] << 21
+
+ carry[1] = (s1 + (1 << 20)) >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[3] = (s3 + (1 << 20)) >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[5] = (s5 + (1 << 20)) >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+ carry[13] = (s13 + (1 << 20)) >> 21
+ s14 += carry[13]
+ s13 -= carry[13] << 21
+ carry[15] = (s15 + (1 << 20)) >> 21
+ s16 += carry[15]
+ s15 -= carry[15] << 21
+ carry[17] = (s17 + (1 << 20)) >> 21
+ s18 += carry[17]
+ s17 -= carry[17] << 21
+ carry[19] = (s19 + (1 << 20)) >> 21
+ s20 += carry[19]
+ s19 -= carry[19] << 21
+ carry[21] = (s21 + (1 << 20)) >> 21
+ s22 += carry[21]
+ s21 -= carry[21] << 21
+
+ s11 += s23 * 666643
+ s12 += s23 * 470296
+ s13 += s23 * 654183
+ s14 -= s23 * 997805
+ s15 += s23 * 136657
+ s16 -= s23 * 683901
+ s23 = 0
+
+ s10 += s22 * 666643
+ s11 += s22 * 470296
+ s12 += s22 * 654183
+ s13 -= s22 * 997805
+ s14 += s22 * 136657
+ s15 -= s22 * 683901
+ s22 = 0
+
+ s9 += s21 * 666643
+ s10 += s21 * 470296
+ s11 += s21 * 654183
+ s12 -= s21 * 997805
+ s13 += s21 * 136657
+ s14 -= s21 * 683901
+ s21 = 0
+
+ s8 += s20 * 666643
+ s9 += s20 * 470296
+ s10 += s20 * 654183
+ s11 -= s20 * 997805
+ s12 += s20 * 136657
+ s13 -= s20 * 683901
+ s20 = 0
+
+ s7 += s19 * 666643
+ s8 += s19 * 470296
+ s9 += s19 * 654183
+ s10 -= s19 * 997805
+ s11 += s19 * 136657
+ s12 -= s19 * 683901
+ s19 = 0
+
+ s6 += s18 * 666643
+ s7 += s18 * 470296
+ s8 += s18 * 654183
+ s9 -= s18 * 997805
+ s10 += s18 * 136657
+ s11 -= s18 * 683901
+ s18 = 0
+
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[12] = (s12 + (1 << 20)) >> 21
+ s13 += carry[12]
+ s12 -= carry[12] << 21
+ carry[14] = (s14 + (1 << 20)) >> 21
+ s15 += carry[14]
+ s14 -= carry[14] << 21
+ carry[16] = (s16 + (1 << 20)) >> 21
+ s17 += carry[16]
+ s16 -= carry[16] << 21
+
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+ carry[13] = (s13 + (1 << 20)) >> 21
+ s14 += carry[13]
+ s13 -= carry[13] << 21
+ carry[15] = (s15 + (1 << 20)) >> 21
+ s16 += carry[15]
+ s15 -= carry[15] << 21
+
+ s5 += s17 * 666643
+ s6 += s17 * 470296
+ s7 += s17 * 654183
+ s8 -= s17 * 997805
+ s9 += s17 * 136657
+ s10 -= s17 * 683901
+ s17 = 0
+
+ s4 += s16 * 666643
+ s5 += s16 * 470296
+ s6 += s16 * 654183
+ s7 -= s16 * 997805
+ s8 += s16 * 136657
+ s9 -= s16 * 683901
+ s16 = 0
+
+ s3 += s15 * 666643
+ s4 += s15 * 470296
+ s5 += s15 * 654183
+ s6 -= s15 * 997805
+ s7 += s15 * 136657
+ s8 -= s15 * 683901
+ s15 = 0
+
+ s2 += s14 * 666643
+ s3 += s14 * 470296
+ s4 += s14 * 654183
+ s5 -= s14 * 997805
+ s6 += s14 * 136657
+ s7 -= s14 * 683901
+ s14 = 0
+
+ s1 += s13 * 666643
+ s2 += s13 * 470296
+ s3 += s13 * 654183
+ s4 -= s13 * 997805
+ s5 += s13 * 136657
+ s6 -= s13 * 683901
+ s13 = 0
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = (s0 + (1 << 20)) >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[2] = (s2 + (1 << 20)) >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[4] = (s4 + (1 << 20)) >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ carry[1] = (s1 + (1 << 20)) >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[3] = (s3 + (1 << 20)) >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[5] = (s5 + (1 << 20)) >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[11] = s11 >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ s[0] = byte(s0 >> 0)
+ s[1] = byte(s0 >> 8)
+ s[2] = byte((s0 >> 16) | (s1 << 5))
+ s[3] = byte(s1 >> 3)
+ s[4] = byte(s1 >> 11)
+ s[5] = byte((s1 >> 19) | (s2 << 2))
+ s[6] = byte(s2 >> 6)
+ s[7] = byte((s2 >> 14) | (s3 << 7))
+ s[8] = byte(s3 >> 1)
+ s[9] = byte(s3 >> 9)
+ s[10] = byte((s3 >> 17) | (s4 << 4))
+ s[11] = byte(s4 >> 4)
+ s[12] = byte(s4 >> 12)
+ s[13] = byte((s4 >> 20) | (s5 << 1))
+ s[14] = byte(s5 >> 7)
+ s[15] = byte((s5 >> 15) | (s6 << 6))
+ s[16] = byte(s6 >> 2)
+ s[17] = byte(s6 >> 10)
+ s[18] = byte((s6 >> 18) | (s7 << 3))
+ s[19] = byte(s7 >> 5)
+ s[20] = byte(s7 >> 13)
+ s[21] = byte(s8 >> 0)
+ s[22] = byte(s8 >> 8)
+ s[23] = byte((s8 >> 16) | (s9 << 5))
+ s[24] = byte(s9 >> 3)
+ s[25] = byte(s9 >> 11)
+ s[26] = byte((s9 >> 19) | (s10 << 2))
+ s[27] = byte(s10 >> 6)
+ s[28] = byte((s10 >> 14) | (s11 << 7))
+ s[29] = byte(s11 >> 1)
+ s[30] = byte(s11 >> 9)
+ s[31] = byte(s11 >> 17)
+}
+
+// Input:
+// s[0]+256*s[1]+...+256^63*s[63] = s
+//
+// Output:
+// s[0]+256*s[1]+...+256^31*s[31] = s mod l
+// where l = 2^252 + 27742317777372353535851937790883648493.
+func ScReduce(out *[32]byte, s *[64]byte) {
+ s0 := 2097151 & load3(s[:])
+ s1 := 2097151 & (load4(s[2:]) >> 5)
+ s2 := 2097151 & (load3(s[5:]) >> 2)
+ s3 := 2097151 & (load4(s[7:]) >> 7)
+ s4 := 2097151 & (load4(s[10:]) >> 4)
+ s5 := 2097151 & (load3(s[13:]) >> 1)
+ s6 := 2097151 & (load4(s[15:]) >> 6)
+ s7 := 2097151 & (load3(s[18:]) >> 3)
+ s8 := 2097151 & load3(s[21:])
+ s9 := 2097151 & (load4(s[23:]) >> 5)
+ s10 := 2097151 & (load3(s[26:]) >> 2)
+ s11 := 2097151 & (load4(s[28:]) >> 7)
+ s12 := 2097151 & (load4(s[31:]) >> 4)
+ s13 := 2097151 & (load3(s[34:]) >> 1)
+ s14 := 2097151 & (load4(s[36:]) >> 6)
+ s15 := 2097151 & (load3(s[39:]) >> 3)
+ s16 := 2097151 & load3(s[42:])
+ s17 := 2097151 & (load4(s[44:]) >> 5)
+ s18 := 2097151 & (load3(s[47:]) >> 2)
+ s19 := 2097151 & (load4(s[49:]) >> 7)
+ s20 := 2097151 & (load4(s[52:]) >> 4)
+ s21 := 2097151 & (load3(s[55:]) >> 1)
+ s22 := 2097151 & (load4(s[57:]) >> 6)
+ s23 := (load4(s[60:]) >> 3)
+
+ s11 += s23 * 666643
+ s12 += s23 * 470296
+ s13 += s23 * 654183
+ s14 -= s23 * 997805
+ s15 += s23 * 136657
+ s16 -= s23 * 683901
+ s23 = 0
+
+ s10 += s22 * 666643
+ s11 += s22 * 470296
+ s12 += s22 * 654183
+ s13 -= s22 * 997805
+ s14 += s22 * 136657
+ s15 -= s22 * 683901
+ s22 = 0
+
+ s9 += s21 * 666643
+ s10 += s21 * 470296
+ s11 += s21 * 654183
+ s12 -= s21 * 997805
+ s13 += s21 * 136657
+ s14 -= s21 * 683901
+ s21 = 0
+
+ s8 += s20 * 666643
+ s9 += s20 * 470296
+ s10 += s20 * 654183
+ s11 -= s20 * 997805
+ s12 += s20 * 136657
+ s13 -= s20 * 683901
+ s20 = 0
+
+ s7 += s19 * 666643
+ s8 += s19 * 470296
+ s9 += s19 * 654183
+ s10 -= s19 * 997805
+ s11 += s19 * 136657
+ s12 -= s19 * 683901
+ s19 = 0
+
+ s6 += s18 * 666643
+ s7 += s18 * 470296
+ s8 += s18 * 654183
+ s9 -= s18 * 997805
+ s10 += s18 * 136657
+ s11 -= s18 * 683901
+ s18 = 0
+
+ var carry [17]int64
+
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[12] = (s12 + (1 << 20)) >> 21
+ s13 += carry[12]
+ s12 -= carry[12] << 21
+ carry[14] = (s14 + (1 << 20)) >> 21
+ s15 += carry[14]
+ s14 -= carry[14] << 21
+ carry[16] = (s16 + (1 << 20)) >> 21
+ s17 += carry[16]
+ s16 -= carry[16] << 21
+
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+ carry[13] = (s13 + (1 << 20)) >> 21
+ s14 += carry[13]
+ s13 -= carry[13] << 21
+ carry[15] = (s15 + (1 << 20)) >> 21
+ s16 += carry[15]
+ s15 -= carry[15] << 21
+
+ s5 += s17 * 666643
+ s6 += s17 * 470296
+ s7 += s17 * 654183
+ s8 -= s17 * 997805
+ s9 += s17 * 136657
+ s10 -= s17 * 683901
+ s17 = 0
+
+ s4 += s16 * 666643
+ s5 += s16 * 470296
+ s6 += s16 * 654183
+ s7 -= s16 * 997805
+ s8 += s16 * 136657
+ s9 -= s16 * 683901
+ s16 = 0
+
+ s3 += s15 * 666643
+ s4 += s15 * 470296
+ s5 += s15 * 654183
+ s6 -= s15 * 997805
+ s7 += s15 * 136657
+ s8 -= s15 * 683901
+ s15 = 0
+
+ s2 += s14 * 666643
+ s3 += s14 * 470296
+ s4 += s14 * 654183
+ s5 -= s14 * 997805
+ s6 += s14 * 136657
+ s7 -= s14 * 683901
+ s14 = 0
+
+ s1 += s13 * 666643
+ s2 += s13 * 470296
+ s3 += s13 * 654183
+ s4 -= s13 * 997805
+ s5 += s13 * 136657
+ s6 -= s13 * 683901
+ s13 = 0
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = (s0 + (1 << 20)) >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[2] = (s2 + (1 << 20)) >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[4] = (s4 + (1 << 20)) >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[6] = (s6 + (1 << 20)) >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[8] = (s8 + (1 << 20)) >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[10] = (s10 + (1 << 20)) >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ carry[1] = (s1 + (1 << 20)) >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[3] = (s3 + (1 << 20)) >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[5] = (s5 + (1 << 20)) >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[7] = (s7 + (1 << 20)) >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[9] = (s9 + (1 << 20)) >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[11] = (s11 + (1 << 20)) >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+ carry[11] = s11 >> 21
+ s12 += carry[11]
+ s11 -= carry[11] << 21
+
+ s0 += s12 * 666643
+ s1 += s12 * 470296
+ s2 += s12 * 654183
+ s3 -= s12 * 997805
+ s4 += s12 * 136657
+ s5 -= s12 * 683901
+ s12 = 0
+
+ carry[0] = s0 >> 21
+ s1 += carry[0]
+ s0 -= carry[0] << 21
+ carry[1] = s1 >> 21
+ s2 += carry[1]
+ s1 -= carry[1] << 21
+ carry[2] = s2 >> 21
+ s3 += carry[2]
+ s2 -= carry[2] << 21
+ carry[3] = s3 >> 21
+ s4 += carry[3]
+ s3 -= carry[3] << 21
+ carry[4] = s4 >> 21
+ s5 += carry[4]
+ s4 -= carry[4] << 21
+ carry[5] = s5 >> 21
+ s6 += carry[5]
+ s5 -= carry[5] << 21
+ carry[6] = s6 >> 21
+ s7 += carry[6]
+ s6 -= carry[6] << 21
+ carry[7] = s7 >> 21
+ s8 += carry[7]
+ s7 -= carry[7] << 21
+ carry[8] = s8 >> 21
+ s9 += carry[8]
+ s8 -= carry[8] << 21
+ carry[9] = s9 >> 21
+ s10 += carry[9]
+ s9 -= carry[9] << 21
+ carry[10] = s10 >> 21
+ s11 += carry[10]
+ s10 -= carry[10] << 21
+
+ out[0] = byte(s0 >> 0)
+ out[1] = byte(s0 >> 8)
+ out[2] = byte((s0 >> 16) | (s1 << 5))
+ out[3] = byte(s1 >> 3)
+ out[4] = byte(s1 >> 11)
+ out[5] = byte((s1 >> 19) | (s2 << 2))
+ out[6] = byte(s2 >> 6)
+ out[7] = byte((s2 >> 14) | (s3 << 7))
+ out[8] = byte(s3 >> 1)
+ out[9] = byte(s3 >> 9)
+ out[10] = byte((s3 >> 17) | (s4 << 4))
+ out[11] = byte(s4 >> 4)
+ out[12] = byte(s4 >> 12)
+ out[13] = byte((s4 >> 20) | (s5 << 1))
+ out[14] = byte(s5 >> 7)
+ out[15] = byte((s5 >> 15) | (s6 << 6))
+ out[16] = byte(s6 >> 2)
+ out[17] = byte(s6 >> 10)
+ out[18] = byte((s6 >> 18) | (s7 << 3))
+ out[19] = byte(s7 >> 5)
+ out[20] = byte(s7 >> 13)
+ out[21] = byte(s8 >> 0)
+ out[22] = byte(s8 >> 8)
+ out[23] = byte((s8 >> 16) | (s9 << 5))
+ out[24] = byte(s9 >> 3)
+ out[25] = byte(s9 >> 11)
+ out[26] = byte((s9 >> 19) | (s10 << 2))
+ out[27] = byte(s10 >> 6)
+ out[28] = byte((s10 >> 14) | (s11 << 7))
+ out[29] = byte(s11 >> 1)
+ out[30] = byte(s11 >> 9)
+ out[31] = byte(s11 >> 17)
+}
+
+// order is the order of Curve25519 in little-endian form.
+var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000}
+
+// ScMinimal returns true if the given scalar is less than the order of the
+// curve.
+func ScMinimal(scalar *[32]byte) bool {
+ for i := 3; ; i-- {
+ v := binary.LittleEndian.Uint64(scalar[i*8:])
+ if v > order[i] {
+ return false
+ } else if v < order[i] {
+ break
+ } else if i == 0 {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
index 73f4fe378..72a6a7394 100644
--- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
+++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
@@ -76,7 +76,9 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)
- s.ModInverse(s, priv.P)
+ if s.ModInverse(s, priv.P) == nil {
+ return nil, errors.New("elgamal: invalid private key")
+ }
s.Mul(s, c2)
s.Mod(s, priv.P)
em := s.Bytes()
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
index 02b372cf3..6d7639722 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
@@ -5,6 +5,7 @@
package packet
import (
+ "crypto"
"crypto/rsa"
"encoding/binary"
"io"
@@ -78,8 +79,9 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- k := priv.PrivateKey.(*rsa.PrivateKey)
- b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
+ // Supports both *rsa.PrivateKey and crypto.Decrypter
+ k := priv.PrivateKey.(crypto.Decrypter)
+ b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil)
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
index 6f8ec0938..81abb7cef 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -31,7 +31,7 @@ type PrivateKey struct {
encryptedData []byte
cipher CipherFunction
s2k func(out, in []byte)
- PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer.
+ PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only).
sha1Checksum bool
iv []byte
}
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 000000000..593f65300
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+
+import (
+ "crypto/hmac"
+ "hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+ prf := hmac.New(h, password)
+ hashLen := prf.Size()
+ numBlocks := (keyLen + hashLen - 1) / hashLen
+
+ var buf [4]byte
+ dk := make([]byte, 0, numBlocks*hashLen)
+ U := make([]byte, hashLen)
+ for block := 1; block <= numBlocks; block++ {
+ // N.B.: || means concatenation, ^ means XOR
+ // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+ // U_1 = PRF(password, salt || uint(i))
+ prf.Reset()
+ prf.Write(salt)
+ buf[0] = byte(block >> 24)
+ buf[1] = byte(block >> 16)
+ buf[2] = byte(block >> 8)
+ buf[3] = byte(block)
+ prf.Write(buf[:4])
+ dk = prf.Sum(dk)
+ T := dk[len(dk)-hashLen:]
+ copy(U, T)
+
+ // U_n = PRF(password, U_(n-1))
+ for n := 2; n <= iter; n++ {
+ prf.Reset()
+ prf.Write(U)
+ U = U[:0]
+ U = prf.Sum(U)
+ for x := range U {
+ T[x] ^= U[x]
+ }
+ }
+ }
+ return dk[:keyLen]
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc
new file mode 100644
index 000000000..730e569b0
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc
@@ -0,0 +1 @@
+'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file
diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitignore b/vendor/gopkg.in/square/go-jose.v2/.gitignore
new file mode 100644
index 000000000..5b4d73b68
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/.gitignore
@@ -0,0 +1,7 @@
+*~
+.*.swp
+*.out
+*.test
+*.pem
+*.cov
+jose-util/jose-util
diff --git a/vendor/gopkg.in/square/go-jose.v2/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml
new file mode 100644
index 000000000..fc501ca9b
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/.travis.yml
@@ -0,0 +1,46 @@
+language: go
+
+sudo: false
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+go:
+- '1.7.x'
+- '1.8.x'
+- '1.9.x'
+- '1.10.x'
+- '1.11.x'
+
+go_import_path: gopkg.in/square/go-jose.v2
+
+before_script:
+- export PATH=$HOME/.local/bin:$PATH
+
+before_install:
+# Install encrypted gitcookies to get around bandwidth-limits
+# that is causing Travis-CI builds to fail. For more info, see
+# https://github.com/golang/go/issues/12933
+- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true
+- bash .gitcookies.sh || true
+- go get github.com/wadey/gocovmerge
+- go get github.com/mattn/goveralls
+- go get github.com/stretchr/testify/assert
+- go get golang.org/x/tools/cmd/cover || true
+- go get code.google.com/p/go.tools/cmd/cover || true
+- pip install cram --user
+
+script:
+- go test . -v -covermode=count -coverprofile=profile.cov
+- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov
+- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov
+- go test ./json -v # no coverage for forked encoding/json package
+- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t
+- cd ..
+
+after_success:
+- gocovmerge *.cov */*.cov > merged.coverprofile
+- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci
+
diff --git a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md
new file mode 100644
index 000000000..3305db0f6
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md
@@ -0,0 +1,10 @@
+Serious about security
+======================
+
+Square recognizes the important contributions the security research community
+can make. We therefore encourage reporting security issues with the code
+contained in this repository.
+
+If you believe you have discovered a security vulnerability, please follow the
+guidelines at <https://bugcrowd.com/squareopensource>.
+
diff --git a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md
new file mode 100644
index 000000000..61b183651
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
+
+Before your code can be accepted into the project you must also sign the
+[Individual Contributor License Agreement][1].
+
+ [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
diff --git a/vendor/gopkg.in/square/go-jose.v2/LICENSE b/vendor/gopkg.in/square/go-jose.v2/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/gopkg.in/square/go-jose.v2/README.md
new file mode 100644
index 000000000..1791bfa8f
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/README.md
@@ -0,0 +1,118 @@
+# Go JOSE
+
+[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1)
+[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2)
+[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE)
+[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose)
+[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. This includes support for JSON Web Encryption,
+JSON Web Signature, and JSON Web Token standards.
+
+**Disclaimer**: This library contains encryption software that is subject to
+the U.S. Export Administration Regulations. You may not export, re-export,
+transfer or download this code or any part of it in violation of any United
+States law, directive or regulation. In particular this software may not be
+exported or re-exported in any form or on any media to Iran, North Sudan,
+Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
+US maintained blocked list.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
+[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
+[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519).
+Tables of supported algorithms are shown below. The library supports both
+the compact and full serialization formats, and has optional support for
+multiple recipients. It also comes with a small command-line utility
+([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages.
+
+### Versions
+
+We use [gopkg.in](https://gopkg.in) for versioning.
+
+[Version 2](https://gopkg.in/square/go-jose.v2)
+([branch](https://github.com/square/go-jose/tree/v2),
+[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version:
+
+ import "gopkg.in/square/go-jose.v2"
+
+The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will
+still receive backported bug fixes and security fixes, but otherwise
+development is frozen. All new feature development takes place on the `v2`
+branch. Version 2 also contains additional sub-packages such as the
+[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation
+contributed by [@shaxbee](https://github.com/shaxbee).
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
+standard where possible. The Godoc reference has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES<sup>1</sup>
+ Direct encryption | dir<sup>1</sup>
+
+<sup>1. Not supported in multi-recipient mode</sup>
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+ Ed25519 | EdDSA<sup>2</sup>
+
+<sup>2. Only available in version 2 of the package</sup>
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
+allows attaching a key id.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
+ EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey)
+ AES, HMAC | []byte
+
+<sup>1. Only available in version 2 of the package</sup>
+
+## Examples
+
+[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1)
+[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2)
+
+Examples can be found in the Godoc
+reference for this package. The
+[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)
+subdirectory also contains a small command-line utility which might be useful
+as an example.
diff --git a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go
new file mode 100644
index 000000000..67935561b
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go
@@ -0,0 +1,592 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ "golang.org/x/crypto/ed25519"
+ "gopkg.in/square/go-jose.v2/cipher"
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+type edEncrypterVerifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+type edDecrypterSigner struct {
+ privateKey ed25519.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
+ if sigAlg != EdDSA {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &edDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ b, err := json.Marshal(&JSONWebKey{
+ Key: &priv.PublicKey,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ headers := rawHeader{
+ headerEPK: makeRawMessage(b),
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ epk, err := headers.getEPK()
+ if err != nil {
+ return nil, errors.New("square/go-jose: invalid epk header")
+ }
+ if epk == nil {
+ return nil, errors.New("square/go-jose: missing epk header")
+ }
+
+ publicKey, ok := epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("square/go-jose: invalid epk header")
+ }
+
+ if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return nil, errors.New("square/go-jose: invalid public key in epk header")
+ }
+
+ apuData, err := headers.getAPU()
+ if err != nil {
+ return nil, errors.New("square/go-jose: invalid apu header")
+ }
+ apvData, err := headers.getAPV()
+ if err != nil {
+ return nil, errors.New("square/go-jose: invalid apv header")
+ }
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ algorithm := headers.getAlgorithm()
+ switch algorithm {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(string(algorithm), keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ if alg != EdDSA {
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: sig,
+ protected: &rawHeader{},
+ }, nil
+}
+
+func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ if alg != EdDSA {
+ return ErrUnsupportedAlgorithm
+ }
+ ok := ed25519.Verify(ctx.publicKey, payload, signature)
+ if !ok {
+ return errors.New("square/go-jose: ed25519 signature failed to verify")
+ }
+ return nil
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("square/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
new file mode 100644
index 000000000..126b85ce2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, []byte(ciphertext[:offset])...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures the the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n uint64) (head, tail []byte) {
+ if uint64(cap(in)) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
new file mode 100644
index 000000000..f62c3bdba
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
new file mode 100644
index 000000000..c128e327f
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
@@ -0,0 +1,62 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+// It is an error to call this function with a private/public key that are not on the same
+// curve. Callers must ensure that the keys are valid before calling this function. Output
+// size may be at most 1<<16 bytes (64 KiB).
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ if size > 1<<16 {
+ panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
+ }
+
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
+ panic("public key not on same curve as private key")
+ }
+
+ z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+ return key
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
new file mode 100644
index 000000000..1d36d5015
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("square/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go
new file mode 100644
index 000000000..c45c71206
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/crypter.go
@@ -0,0 +1,535 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JSONWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
+ Options() EncrypterOptions
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// EncrypterOptions represents options that can be set on new encrypters.
+type EncrypterOptions struct {
+ Compression CompressionAlgorithm
+
+ // Optional map of additional keys to be inserted into the protected header
+ // of a JWS object. Some specifications which make use of JWS like to insert
+ // additional values here. All values must be JSON-serializable.
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary. It returns itself and so can be used in a fluent style.
+func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
+ if eo.ExtraHeaders == nil {
+ eo.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ eo.ExtraHeaders[k] = v
+ return eo
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// EncrypterOptions.
+func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
+func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderType, typ)
+}
+
+// Recipient represents an algorithm/key to encrypt messages to.
+//
+// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
+// on the password-based encryption algorithms PBES2-HS256+A128KW,
+// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
+// default of 100000 will be used for the count and a 128-bit random salt will
+// be generated.
+type Recipient struct {
+ Algorithm KeyAlgorithm
+ Key interface{}
+ KeyID string
+ PBES2Count int
+ PBES2Salt []byte
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := rcpt.Key.(type) {
+ case JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case *JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch rcpt.Algorithm {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
+ return nil, ErrInvalidKeySize
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: rawKey.([]byte),
+ }
+ recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ typeOf := reflect.TypeOf(rawKey)
+ if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: rawKey.(*ecdsa.PublicKey),
+ }
+ recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.addRecipient(rcpt)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+ if rcpts == nil || len(rcpts) == 0 {
+ return nil, fmt.Errorf("square/go-jose: recipients is nil or empty")
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ }
+
+ for _, recipient := range rcpts {
+ err := encrypter.addRecipient(recipient)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
+ var recipientInfo recipientKeyInfo
+
+ switch recipient.Algorithm {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
+ }
+
+ recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
+ if recipient.KeyID != "" {
+ recipientInfo.keyID = recipient.KeyID
+ }
+
+ switch recipient.Algorithm {
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
+ sr.p2c = recipient.PBES2Count
+ sr.p2s = recipient.PBES2Salt
+ }
+ }
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipientInfo)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case string:
+ return newSymmetricRecipient(alg, []byte(encryptionKey))
+ case *JSONWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ recipient.keyID = encryptionKey.KeyID
+ return recipient, err
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+ }
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case string:
+ return &symmetricKeyCipher{
+ key: []byte(decryptionKey),
+ }, nil
+ case JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case *JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{}
+ err := obj.protected.set(headerEncryption, ctx.contentAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ err = recipient.header.set(headerAlgorithm, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ if info.keyID != "" {
+ err = recipient.header.set(headerKeyID, info.keyID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ err = obj.protected.set(headerCompression, ctx.compressionAlg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for k, v := range ctx.extraHeaders {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ (*obj.protected)[k] = makeRawMessage(b)
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+func (ctx *genericEncrypter) Options() EncrypterOptions {
+ return EncrypterOptions{
+ Compression: ctx.compressionAlg,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Decrypt and validate the object and return the plaintext. Note that this
+// function does not support multi-recipient, if you desire multi-recipient
+// decryption use DecryptMulti instead.
+func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(obj.recipients) > 1 {
+ return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one")
+ }
+
+ critical, err := headers.getCritical()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return nil, fmt.Errorf("square/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.getEncryption())
+ if cipher == nil {
+ return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ recipient := obj.recipients[0]
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ }
+
+ return plaintext, err
+}
+
+// DecryptMulti decrypts and validates the object and returns the plaintexts,
+// with support for multiple recipients. It returns the index of the recipient
+// for which the decryption was successful, the merged headers for that recipient,
+// and the plaintext.
+func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
+ globalHeaders := obj.mergedHeaders(nil)
+
+ critical, err := globalHeaders.getCritical()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
+
+ encryption := globalHeaders.getEncryption()
+ cipher := getContentCipher(encryption)
+ if cipher == nil {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ index := -1
+ var plaintext []byte
+ var headers rawHeader
+
+ for i, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ index = i
+ headers = recipientHeaders
+ break
+ }
+ }
+ }
+
+ if plaintext == nil || err != nil {
+ return -1, Header{}, nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ }
+
+ sanitized, err := headers.sanitized()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err)
+ }
+
+ return index, sanitized, plaintext, err
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/doc.go b/vendor/gopkg.in/square/go-jose.v2/doc.go
new file mode 100644
index 000000000..dd1387f3f
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/doc.go
@@ -0,0 +1,27 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. It implements encryption and signing based on
+the JSON Web Encryption and JSON Web Signature standards, with optional JSON
+Web Token support available in a sub-package. The library supports both the
+compact and full serialization formats, and has optional support for multiple
+recipients.
+
+*/
+package jose
diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go
new file mode 100644
index 000000000..b9687c647
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/encoding.go
@@ -0,0 +1,179 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "io"
+ "math/big"
+ "regexp"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+var stripWhitespaceRegex = regexp.MustCompile("\\s")
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := json.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/square/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ return stripWhitespaceRegex.ReplaceAllString(data, "")
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Compress with DEFLATE
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// Decompress with DEFLATE
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ _, err := io.Copy(output, reader)
+ if err != nil {
+ return nil, err
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := json.Unmarshal(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64.RawURLEncoding.DecodeString(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64.RawURLEncoding.EncodeToString(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/README.md b/vendor/gopkg.in/square/go-jose.v2/json/README.md
new file mode 100644
index 000000000..86de5e558
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go
new file mode 100644
index 000000000..37457e5a8
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/decode.go
@@ -0,0 +1,1183 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ useNumber bool
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/encode.go b/vendor/gopkg.in/square/go-jose.v2/json/encode.go
new file mode 100644
index 000000000..1dae8bb7c
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // The characters can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by objects that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+ Value reflect.Value
+ Str string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+ S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+ scratch [64]byte
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+ if v := encodeStatePool.Get(); v != nil {
+ e := v.(*encodeState)
+ e.Reset()
+ return e
+ }
+ return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if s, ok := r.(string); ok {
+ panic(s)
+ }
+ err = r.(error)
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v))
+ return nil
+}
+
+func (e *encodeState) error(err error) {
+ panic(err)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value) {
+ valueEncoder(v)(e, v, false)
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
+
+var encoderCache struct {
+ sync.RWMutex
+ m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+ if !v.IsValid() {
+ return invalidValueEncoder
+ }
+ return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+ encoderCache.RLock()
+ f := encoderCache.m[t]
+ encoderCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // To deal with recursive types, populate the map with an
+ // indirect func before we build it. This type waits on the
+ // real func (f) to be ready and then calls it. This indirect
+ // func is only used for recursive types.
+ encoderCache.Lock()
+ if encoderCache.m == nil {
+ encoderCache.m = make(map[reflect.Type]encoderFunc)
+ }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
+ wg.Wait()
+ f(e, v, quoted)
+ }
+ encoderCache.Unlock()
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = newTypeEncoder(t, true)
+ wg.Done()
+ encoderCache.Lock()
+ encoderCache.m[t] = f
+ encoderCache.Unlock()
+ return f
+}
+
+var (
+ marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+ textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+ if t.Implements(marshalerType) {
+ return marshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ if t.Implements(textMarshalerType) {
+ return textMarshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(textMarshalerType) {
+ return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return boolEncoder
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intEncoder
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintEncoder
+ case reflect.Float32:
+ return float32Encoder
+ case reflect.Float64:
+ return float64Encoder
+ case reflect.String:
+ return stringEncoder
+ case reflect.Interface:
+ return interfaceEncoder
+ case reflect.Struct:
+ return newStructEncoder(t)
+ case reflect.Map:
+ return newMapEncoder(t)
+ case reflect.Slice:
+ return newSliceEncoder(t)
+ case reflect.Array:
+ return newArrayEncoder(t)
+ case reflect.Ptr:
+ return newPtrEncoder(t)
+ default:
+ return unsupportedTypeEncoder
+ }
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if quoted {
+ e.WriteByte('"')
+ }
+ if v.Bool() {
+ e.WriteString("true")
+ } else {
+ e.WriteString("false")
+ }
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ f := v.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+ }
+ b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+var (
+ float32Encoder = (floatEncoder(32)).encode
+ float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Type() == numberType {
+ numStr := v.String()
+ // In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+ // we keep compatibility so check validity after this.
+ if numStr == "" {
+ numStr = "0" // Number's zero-val
+ }
+ if !isValidNumber(numStr) {
+ e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+ }
+ e.WriteString(numStr)
+ return
+ }
+ if quoted {
+ sb, err := Marshal(v.String())
+ if err != nil {
+ e.error(err)
+ }
+ e.string(string(sb))
+ } else {
+ e.string(v.String())
+ }
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem())
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+ fields []field
+ fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteByte('{')
+ first := true
+ for i, f := range se.fields {
+ fv := fieldByIndex(v, f.index)
+ if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ e.WriteByte(',')
+ }
+ e.string(f.name)
+ e.WriteByte(':')
+ se.fieldEncs[i](e, fv, f.quoted)
+ }
+ e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+ fields := cachedTypeFields(t)
+ se := &structEncoder{
+ fields: fields,
+ fieldEncs: make([]encoderFunc, len(fields)),
+ }
+ for i, f := range fields {
+ se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+ }
+ return se.encode
+}
+
+type mapEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.WriteByte('{')
+ var sv stringValues = v.MapKeys()
+ sort.Sort(sv)
+ for i, k := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(k.String())
+ e.WriteByte(':')
+ me.elemEnc(e, v.MapIndex(k), false)
+ }
+ e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+ if t.Key().Kind() != reflect.String {
+ return unsupportedTypeEncoder
+ }
+ me := &mapEncoder{typeEncoder(t.Elem())}
+ return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ s := v.Bytes()
+ e.WriteByte('"')
+ if len(s) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+ arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ se.arrayEnc(e, v, false)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+ // Byte slices get special treatment; arrays don't.
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteSlice
+ }
+ enc := &sliceEncoder{newArrayEncoder(t)}
+ return enc.encode
+}
+
+type arrayEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ ae.elemEnc(e, v.Index(i), false)
+ }
+ e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+ enc := &arrayEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type ptrEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ pe.elemEnc(e, v.Elem(), quoted)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+ enc := &ptrEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type condAddrEncoder struct {
+ canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.CanAddr() {
+ ce.canAddrEnc(e, v, quoted)
+ } else {
+ ce.elseEnc(e, v, quoted)
+ }
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+ enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return enc.encode
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+ for _, i := range index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, > and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, >, and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Only strings, floats, integers, and booleans can be quoted.
+ quoted := false
+ if opts.Contains("string") {
+ switch ft.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ quoted = true
+ }
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: quoted,
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/indent.go b/vendor/gopkg.in/square/go-jose.v2/json/indent.go
new file mode 100644
index 000000000..7cd9f4db1
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/indent.go
@@ -0,0 +1,141 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ start := 0
+ for i, c := range src {
+ if escape && (c == '<' || c == '>' || c == '&') {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ v := scan.step(&scan, c)
+ if v >= scanSkipSpace {
+ if v == scanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.step(&scan, c)
+ if v == scanSkipSpace {
+ continue
+ }
+ if v == scanError {
+ break
+ }
+ if needIndent && v != scanEndObject && v != scanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == scanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ return nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/scanner.go b/vendor/gopkg.in/square/go-jose.v2/json/scanner.go
new file mode 100644
index 000000000..ee6622e8c
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/scanner.go
@@ -0,0 +1,623 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+ scan.reset()
+ for i, c := range data {
+ v := scan.step(scan, c)
+ if v >= scanEndObject {
+ switch v {
+ // probe the scanner with a space to determine whether we will
+ // get scanEnd on the next character. Otherwise, if the next character
+ // is not a space, scanEndTop allocates a needless error.
+ case scanEndObject, scanEndArray:
+ if scan.step(scan, ' ') == scanEnd {
+ return data[:i+1], data[i+1:], nil
+ }
+ case scanError:
+ return nil, nil, scan.err
+ case scanEnd:
+ return data[:i], data[i:], nil
+ }
+ }
+ }
+ if scan.eof() == scanError {
+ return nil, nil, scan.err
+ }
+ return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ step func(*scanner, byte) int
+
+ // Reached end of top-level value.
+ endTop bool
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ err error
+
+ // 1-byte redo (see undo method)
+ redo bool
+ redoCode int
+ redoState func(*scanner, byte) int
+
+ // total bytes consumed, updated by decoder.Decode
+ bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+ // Continue.
+ scanContinue = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+ s.redo = false
+ s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+ s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ s.redo = false
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+func isSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ s.pushParseState(parseObjectKey)
+ return scanBeginObject
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ s.pushParseState(parseArrayValue)
+ return scanBeginArray
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+ s.step = stateError
+ s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+ if s.redo {
+ panic("json: invalid use of scanner")
+ }
+ s.redoCode = scanCode
+ s.redoState = s.step
+ s.step = stateRedo
+ s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c byte) int {
+ s.redo = false
+ s.step = s.redoState
+ return s.redoCode
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/stream.go b/vendor/gopkg.in/square/go-jose.v2/json/stream.go
new file mode 100644
index 000000000..8ddcf4d27
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/stream.go
@@ -0,0 +1,480 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// A Decoder reads and decodes JSON objects from an input stream.
+type Decoder struct {
+ r io.Reader
+ buf []byte
+ d decodeState
+ scanp int // start of unread data in buf
+ scan scanner
+ err error
+
+ tokenState int
+ tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ if err := dec.tokenPrepareForDecode(); err != nil {
+ return err
+ }
+
+ if !dec.tokenValueAllowed() {
+ return &SyntaxError{msg: "not at beginning of value"}
+ }
+
+ // Read whole value into buffer.
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+ dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+ dec.scanp += n
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ err = dec.d.unmarshal(v)
+
+ // fixup token streaming state
+ dec.tokenValueEnd()
+
+ return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.reset()
+
+ scanp := dec.scanp
+ var err error
+Input:
+ for {
+ // Look in the buffer for a new value.
+ for i, c := range dec.buf[scanp:] {
+ dec.scan.bytes++
+ v := dec.scan.step(&dec.scan, c)
+ if v == scanEnd {
+ scanp += i
+ break Input
+ }
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+ scanp += i + 1
+ break Input
+ }
+ if v == scanError {
+ dec.err = dec.scan.err
+ return 0, dec.scan.err
+ }
+ }
+ scanp = len(dec.buf)
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.step(&dec.scan, ' ') == scanEnd {
+ break Input
+ }
+ if nonSpace(dec.buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ n := scanp - dec.scanp
+ err = dec.refill()
+ scanp = dec.scanp + n
+ }
+ return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+ // Make room to read more into the buffer.
+ // First slide down data already consumed.
+ if dec.scanp > 0 {
+ n := copy(dec.buf, dec.buf[dec.scanp:])
+ dec.buf = dec.buf[:n]
+ dec.scanp = 0
+ }
+
+ // Grow buffer if not large enough.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf) < minRead {
+ newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+ copy(newBuf, dec.buf)
+ dec.buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+ return err
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+ w io.Writer
+ err error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ e := newEncodeState()
+ err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ e.WriteByte('\n')
+
+ if _, err = enc.w.Write(e.Bytes()); err != nil {
+ enc.err = err
+ }
+ encodeStatePool.Put(e)
+ return err
+}
+
+// RawMessage is a raw encoded JSON object.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+ return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+// A Token holds a value of one of these types:
+//
+// Delim, for the four JSON delimiters [ ] { }
+// bool, for JSON booleans
+// float64, for JSON numbers
+// Number, for JSON numbers
+// string, for JSON string literals
+// nil, for JSON null
+//
+type Token interface{}
+
+const (
+ tokenTopValue = iota
+ tokenArrayStart
+ tokenArrayValue
+ tokenArrayComma
+ tokenObjectStart
+ tokenObjectKey
+ tokenObjectColon
+ tokenObjectValue
+ tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+ // Note: Not calling peek before switch, to avoid
+ // putting peek into the standard Decode path.
+ // peek is only called when using the Token API.
+ switch dec.tokenState {
+ case tokenArrayComma:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ',' {
+ return &SyntaxError{"expected comma after array element", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ case tokenObjectColon:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ':' {
+ return &SyntaxError{"expected colon after object key", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ }
+ return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+ switch dec.tokenState {
+ case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ return true
+ }
+ return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+ switch dec.tokenState {
+ case tokenArrayStart, tokenArrayValue:
+ dec.tokenState = tokenArrayComma
+ case tokenObjectValue:
+ dec.tokenState = tokenObjectComma
+ }
+}
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+ return string(d)
+}
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+ for {
+ c, err := dec.peek()
+ if err != nil {
+ return nil, err
+ }
+ switch c {
+ case '[':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenArrayStart
+ return Delim('['), nil
+
+ case ']':
+ if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim(']'), nil
+
+ case '{':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenObjectStart
+ return Delim('{'), nil
+
+ case '}':
+ if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim('}'), nil
+
+ case ':':
+ if dec.tokenState != tokenObjectColon {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ continue
+
+ case ',':
+ if dec.tokenState == tokenArrayComma {
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ continue
+ }
+ if dec.tokenState == tokenObjectComma {
+ dec.scanp++
+ dec.tokenState = tokenObjectKey
+ continue
+ }
+ return dec.tokenError(c)
+
+ case '"':
+ if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+ var x string
+ old := dec.tokenState
+ dec.tokenState = tokenTopValue
+ err := dec.Decode(&x)
+ dec.tokenState = old
+ if err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ dec.tokenState = tokenObjectColon
+ return x, nil
+ }
+ fallthrough
+
+ default:
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ return x, nil
+ }
+ }
+}
+
+func clearOffset(err error) {
+ if s, ok := err.(*SyntaxError); ok {
+ s.Offset = 0
+ }
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+ var context string
+ switch dec.tokenState {
+ case tokenTopValue:
+ context = " looking for beginning of value"
+ case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ context = " looking for beginning of value"
+ case tokenArrayComma:
+ context = " after array element"
+ case tokenObjectKey:
+ context = " looking for beginning of object key string"
+ case tokenObjectColon:
+ context = " after object key"
+ case tokenObjectComma:
+ context = " after object key:value pair"
+ }
+ return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+ c, err := dec.peek()
+ return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+ var err error
+ for {
+ for i := dec.scanp; i < len(dec.buf); i++ {
+ c := dec.buf[i]
+ if isSpace(c) {
+ continue
+ }
+ dec.scanp = i
+ return c, nil
+ }
+ // buffer has been scanned, now report any error
+ if err != nil {
+ return 0, err
+ }
+ err = dec.refill()
+ }
+}
+
+/*
+TODO
+
+// EncodeToken writes the given JSON token to the stream.
+// It returns an error if the delimiters [ ] { } are not properly used.
+//
+// EncodeToken does not call Flush, because usually it is part of
+// a larger operation such as Encode, and those will call Flush when finished.
+// Callers that create an Encoder and then invoke EncodeToken directly,
+// without using Encode, need to call Flush when finished to ensure that
+// the JSON is written to the underlying writer.
+func (e *Encoder) EncodeToken(t Token) error {
+ ...
+}
+
+*/
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/tags.go b/vendor/gopkg.in/square/go-jose.v2/json/tags.go
new file mode 100644
index 000000000..c38fd5102
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/jwe.go b/vendor/gopkg.in/square/go-jose.v2/jwe.go
new file mode 100644
index 000000000..b5a6dcdf4
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/jwe.go
@@ -0,0 +1,294 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
+type rawJSONWebEncryption struct {
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Unprotected *rawHeader `json:"unprotected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Recipients []rawRecipientInfo `json:"recipients,omitempty"`
+ Aad *byteBuffer `json:"aad,omitempty"`
+ EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
+ Iv *byteBuffer `json:"iv,omitempty"`
+ Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
+ Tag *byteBuffer `json:"tag,omitempty"`
+}
+
+// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
+type rawRecipientInfo struct {
+ Header *rawHeader `json:"header,omitempty"`
+ EncryptedKey string `json:"encrypted_key,omitempty"`
+}
+
+// JSONWebEncryption represents an encrypted JWE object after parsing.
+type JSONWebEncryption struct {
+ Header Header
+ protected, unprotected *rawHeader
+ recipients []recipientInfo
+ aad, iv, ciphertext, tag []byte
+ original *rawJSONWebEncryption
+}
+
+// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
+type recipientInfo struct {
+ header *rawHeader
+ encryptedKey []byte
+}
+
+// GetAuthData retrieves the (optional) authenticated data attached to the object.
+func (obj JSONWebEncryption) GetAuthData() []byte {
+ if obj.aad != nil {
+ out := make([]byte, len(obj.aad))
+ copy(out, obj.aad)
+ return out
+ }
+
+ return nil
+}
+
+// Get the merged header values
+func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
+ out := rawHeader{}
+ out.merge(obj.protected)
+ out.merge(obj.unprotected)
+
+ if recipient != nil {
+ out.merge(recipient.header)
+ }
+
+ return out
+}
+
+// Get the additional authenticated data from a JWE object.
+func (obj JSONWebEncryption) computeAuthData() []byte {
+ var protected string
+
+ if obj.original != nil && obj.original.Protected != nil {
+ protected = obj.original.Protected.base64()
+ } else if obj.protected != nil {
+ protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected)))
+ } else {
+ protected = ""
+ }
+
+ output := []byte(protected)
+ if obj.aad != nil {
+ output = append(output, '.')
+ output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...)
+ }
+
+ return output
+}
+
+// ParseEncrypted parses an encrypted message in compact or full serialization format.
+func ParseEncrypted(input string) (*JSONWebEncryption, error) {
+ input = stripWhitespace(input)
+ if strings.HasPrefix(input, "{") {
+ return parseEncryptedFull(input)
+ }
+
+ return parseEncryptedCompact(input)
+}
+
+// parseEncryptedFull parses a message in compact format.
+func parseEncryptedFull(input string) (*JSONWebEncryption, error) {
+ var parsed rawJSONWebEncryption
+ err := json.Unmarshal([]byte(input), &parsed)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsed.sanitized()
+}
+
+// sanitized produces a cleaned-up JWE object from the raw JSON.
+func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{
+ original: parsed,
+ unprotected: parsed.Unprotected,
+ }
+
+ // Check that there is not a nonce in the unprotected headers
+ if parsed.Unprotected != nil {
+ if nonce := parsed.Unprotected.getNonce(); nonce != "" {
+ return nil, ErrUnprotectedNonce
+ }
+ }
+ if parsed.Header != nil {
+ if nonce := parsed.Header.getNonce(); nonce != "" {
+ return nil, ErrUnprotectedNonce
+ }
+ }
+
+ if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
+ err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
+ }
+ }
+
+ // Note: this must be called _after_ we parse the protected header,
+ // otherwise fields from the protected header will not get picked up.
+ var err error
+ mergedHeaders := obj.mergedHeaders(nil)
+ obj.Header, err = mergedHeaders.sanitized()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
+ }
+
+ if len(parsed.Recipients) == 0 {
+ obj.recipients = []recipientInfo{
+ {
+ header: parsed.Header,
+ encryptedKey: parsed.EncryptedKey.bytes(),
+ },
+ }
+ } else {
+ obj.recipients = make([]recipientInfo, len(parsed.Recipients))
+ for r := range parsed.Recipients {
+ encryptedKey, err := base64.RawURLEncoding.DecodeString(parsed.Recipients[r].EncryptedKey)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ obj.recipients[r].header = parsed.Recipients[r].Header
+ obj.recipients[r].encryptedKey = encryptedKey
+ }
+ }
+
+ for _, recipient := range obj.recipients {
+ headers := obj.mergedHeaders(&recipient)
+ if headers.getAlgorithm() == "" || headers.getEncryption() == "" {
+ return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers")
+ }
+ }
+
+ obj.iv = parsed.Iv.bytes()
+ obj.ciphertext = parsed.Ciphertext.bytes()
+ obj.tag = parsed.Tag.bytes()
+ obj.aad = parsed.Aad.bytes()
+
+ return obj, nil
+}
+
+// parseEncryptedCompact parses a message in compact format.
+func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
+ parts := strings.Split(input, ".")
+ if len(parts) != 5 {
+ return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts")
+ }
+
+ rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
+ if err != nil {
+ return nil, err
+ }
+
+ encryptedKey, err := base64.RawURLEncoding.DecodeString(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ iv, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertext, err := base64.RawURLEncoding.DecodeString(parts[3])
+ if err != nil {
+ return nil, err
+ }
+
+ tag, err := base64.RawURLEncoding.DecodeString(parts[4])
+ if err != nil {
+ return nil, err
+ }
+
+ raw := &rawJSONWebEncryption{
+ Protected: newBuffer(rawProtected),
+ EncryptedKey: newBuffer(encryptedKey),
+ Iv: newBuffer(iv),
+ Ciphertext: newBuffer(ciphertext),
+ Tag: newBuffer(tag),
+ }
+
+ return raw.sanitized()
+}
+
+// CompactSerialize serializes an object using the compact serialization format.
+func (obj JSONWebEncryption) CompactSerialize() (string, error) {
+ if len(obj.recipients) != 1 || obj.unprotected != nil ||
+ obj.protected == nil || obj.recipients[0].header != nil {
+ return "", ErrNotSupported
+ }
+
+ serializedProtected := mustSerializeJSON(obj.protected)
+
+ return fmt.Sprintf(
+ "%s.%s.%s.%s.%s",
+ base64.RawURLEncoding.EncodeToString(serializedProtected),
+ base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
+ base64.RawURLEncoding.EncodeToString(obj.iv),
+ base64.RawURLEncoding.EncodeToString(obj.ciphertext),
+ base64.RawURLEncoding.EncodeToString(obj.tag)), nil
+}
+
+// FullSerialize serializes an object using the full JSON serialization format.
+func (obj JSONWebEncryption) FullSerialize() string {
+ raw := rawJSONWebEncryption{
+ Unprotected: obj.unprotected,
+ Iv: newBuffer(obj.iv),
+ Ciphertext: newBuffer(obj.ciphertext),
+ EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
+ Tag: newBuffer(obj.tag),
+ Aad: newBuffer(obj.aad),
+ Recipients: []rawRecipientInfo{},
+ }
+
+ if len(obj.recipients) > 1 {
+ for _, recipient := range obj.recipients {
+ info := rawRecipientInfo{
+ Header: recipient.header,
+ EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey),
+ }
+ raw.Recipients = append(raw.Recipients, info)
+ }
+ } else {
+ // Use flattened serialization
+ raw.Header = obj.recipients[0].header
+ raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
+ }
+
+ if obj.protected != nil {
+ raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
+ }
+
+ return string(mustSerializeJSON(raw))
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/jwk.go b/vendor/gopkg.in/square/go-jose.v2/jwk.go
new file mode 100644
index 000000000..6cb8adb84
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/jwk.go
@@ -0,0 +1,608 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+
+ "golang.org/x/crypto/ed25519"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing.
+type rawJSONWebKey struct {
+ Use string `json:"use,omitempty"`
+ Kty string `json:"kty,omitempty"`
+ Kid string `json:"kid,omitempty"`
+ Crv string `json:"crv,omitempty"`
+ Alg string `json:"alg,omitempty"`
+ K *byteBuffer `json:"k,omitempty"`
+ X *byteBuffer `json:"x,omitempty"`
+ Y *byteBuffer `json:"y,omitempty"`
+ N *byteBuffer `json:"n,omitempty"`
+ E *byteBuffer `json:"e,omitempty"`
+ // -- Following fields are only used for private keys --
+ // RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
+ // completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
+ // we have a private key whereas D == nil means we have only a public key.
+ D *byteBuffer `json:"d,omitempty"`
+ P *byteBuffer `json:"p,omitempty"`
+ Q *byteBuffer `json:"q,omitempty"`
+ Dp *byteBuffer `json:"dp,omitempty"`
+ Dq *byteBuffer `json:"dq,omitempty"`
+ Qi *byteBuffer `json:"qi,omitempty"`
+ // Certificates
+ X5c []string `json:"x5c,omitempty"`
+}
+
+// JSONWebKey represents a public or private key in JWK format.
+type JSONWebKey struct {
+ Key interface{}
+ Certificates []*x509.Certificate
+ KeyID string
+ Algorithm string
+ Use string
+}
+
+// MarshalJSON serializes the given key to its JSON representation.
+func (k JSONWebKey) MarshalJSON() ([]byte, error) {
+ var raw *rawJSONWebKey
+ var err error
+
+ switch key := k.Key.(type) {
+ case ed25519.PublicKey:
+ raw = fromEdPublicKey(key)
+ case *ecdsa.PublicKey:
+ raw, err = fromEcPublicKey(key)
+ case *rsa.PublicKey:
+ raw = fromRsaPublicKey(key)
+ case ed25519.PrivateKey:
+ raw, err = fromEdPrivateKey(key)
+ case *ecdsa.PrivateKey:
+ raw, err = fromEcPrivateKey(key)
+ case *rsa.PrivateKey:
+ raw, err = fromRsaPrivateKey(key)
+ case []byte:
+ raw, err = fromSymmetricKey(key)
+ default:
+ return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ raw.Kid = k.KeyID
+ raw.Alg = k.Algorithm
+ raw.Use = k.Use
+
+ for _, cert := range k.Certificates {
+ raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
+ }
+
+ return json.Marshal(raw)
+}
+
+// UnmarshalJSON reads a key from its JSON representation.
+func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
+ var raw rawJSONWebKey
+ err = json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ var key interface{}
+ switch raw.Kty {
+ case "EC":
+ if raw.D != nil {
+ key, err = raw.ecPrivateKey()
+ } else {
+ key, err = raw.ecPublicKey()
+ }
+ case "RSA":
+ if raw.D != nil {
+ key, err = raw.rsaPrivateKey()
+ } else {
+ key, err = raw.rsaPublicKey()
+ }
+ case "oct":
+ key, err = raw.symmetricKey()
+ case "OKP":
+ if raw.Crv == "Ed25519" && raw.X != nil {
+ if raw.D != nil {
+ key, err = raw.edPrivateKey()
+ } else {
+ key, err = raw.edPublicKey()
+ }
+ } else {
+ err = fmt.Errorf("square/go-jose: unknown curve %s'", raw.Crv)
+ }
+ default:
+ err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty)
+ }
+
+ if err == nil {
+ *k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use}
+
+ k.Certificates, err = parseCertificateChain(raw.X5c)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal x5c field: %s", err)
+ }
+ }
+
+ return
+}
+
+// JSONWebKeySet represents a JWK Set object.
+type JSONWebKeySet struct {
+ Keys []JSONWebKey `json:"keys"`
+}
+
+// Key convenience method returns keys by key ID. Specification states
+// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
+// cases where they are not distinct. Hence method returns a slice
+// of JSONWebKeys.
+func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
+ var keys []JSONWebKey
+ for _, key := range s.Keys {
+ if key.KeyID == kid {
+ keys = append(keys, key)
+ }
+ }
+
+ return keys
+}
+
+const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
+const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
+const edThumbprintTemplate = `{"crv":"%s","kty":"OKP",x":"%s"}`
+
+func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
+ coordLength := curveSize(curve)
+ crv, err := curveName(curve)
+ if err != nil {
+ return "", err
+ }
+
+ if len(x.Bytes()) > coordLength || len(y.Bytes()) > coordLength {
+ return "", errors.New("square/go-jose: invalid elliptic key (too large)")
+ }
+
+ return fmt.Sprintf(ecThumbprintTemplate, crv,
+ newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
+ newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
+}
+
+func rsaThumbprintInput(n *big.Int, e int) (string, error) {
+ return fmt.Sprintf(rsaThumbprintTemplate,
+ newBufferFromInt(uint64(e)).base64(),
+ newBuffer(n.Bytes()).base64()), nil
+}
+
+func edThumbprintInput(ed ed25519.PublicKey) (string, error) {
+ crv := "Ed25519"
+ if len(ed) > 32 {
+ return "", errors.New("square/go-jose: invalid elliptic key (too large)")
+ }
+ return fmt.Sprintf(edThumbprintTemplate, crv,
+ newFixedSizeBuffer(ed, 32).base64()), nil
+}
+
+// Thumbprint computes the JWK Thumbprint of a key using the
+// indicated hash algorithm.
+func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ var input string
+ var err error
+ switch key := k.Key.(type) {
+ case ed25519.PublicKey:
+ input, err = edThumbprintInput(key)
+ case *ecdsa.PublicKey:
+ input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
+ case *ecdsa.PrivateKey:
+ input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
+ case *rsa.PublicKey:
+ input, err = rsaThumbprintInput(key.N, key.E)
+ case *rsa.PrivateKey:
+ input, err = rsaThumbprintInput(key.N, key.E)
+ case ed25519.PrivateKey:
+ input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
+ default:
+ return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ h := hash.New()
+ h.Write([]byte(input))
+ return h.Sum(nil), nil
+}
+
+// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
+func (k *JSONWebKey) IsPublic() bool {
+ switch k.Key.(type) {
+ case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
+ return true
+ default:
+ return false
+ }
+}
+
+// Public creates JSONWebKey with corresponding publik key if JWK represents asymmetric private key.
+func (k *JSONWebKey) Public() JSONWebKey {
+ if k.IsPublic() {
+ return *k
+ }
+ ret := *k
+ switch key := k.Key.(type) {
+ case *ecdsa.PrivateKey:
+ ret.Key = key.Public()
+ case *rsa.PrivateKey:
+ ret.Key = key.Public()
+ case ed25519.PrivateKey:
+ ret.Key = key.Public()
+ default:
+ return JSONWebKey{} // returning invalid key
+ }
+ return ret
+}
+
+// Valid checks that the key contains the expected parameters.
+func (k *JSONWebKey) Valid() bool {
+ if k.Key == nil {
+ return false
+ }
+ switch key := k.Key.(type) {
+ case *ecdsa.PublicKey:
+ if key.Curve == nil || key.X == nil || key.Y == nil {
+ return false
+ }
+ case *ecdsa.PrivateKey:
+ if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
+ return false
+ }
+ case *rsa.PublicKey:
+ if key.N == nil || key.E == 0 {
+ return false
+ }
+ case *rsa.PrivateKey:
+ if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
+ return false
+ }
+ case ed25519.PublicKey:
+ if len(key) != 32 {
+ return false
+ }
+ case ed25519.PrivateKey:
+ if len(key) != 64 {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
+ if key.N == nil || key.E == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values")
+ }
+
+ return &rsa.PublicKey{
+ N: key.N.bigInt(),
+ E: key.E.toInt(),
+ }, nil
+}
+
+func fromEdPublicKey(pub ed25519.PublicKey) *rawJSONWebKey {
+ return &rawJSONWebKey{
+ Kty: "OKP",
+ Crv: "Ed25519",
+ X: newBuffer(pub),
+ }
+}
+
+func fromRsaPublicKey(pub *rsa.PublicKey) *rawJSONWebKey {
+ return &rawJSONWebKey{
+ Kty: "RSA",
+ N: newBuffer(pub.N.Bytes()),
+ E: newBufferFromInt(uint64(pub.E)),
+ }
+}
+
+func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
+ var curve elliptic.Curve
+ switch key.Crv {
+ case "P-256":
+ curve = elliptic.P256()
+ case "P-384":
+ curve = elliptic.P384()
+ case "P-521":
+ curve = elliptic.P521()
+ default:
+ return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
+ }
+
+ if key.X == nil || key.Y == nil {
+ return nil, errors.New("square/go-jose: invalid EC key, missing x/y values")
+ }
+
+ // The length of this octet string MUST be the full size of a coordinate for
+ // the curve specified in the "crv" parameter.
+ // https://tools.ietf.org/html/rfc7518#section-6.2.1.2
+ if curveSize(curve) != len(key.X.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for x")
+ }
+
+ if curveSize(curve) != len(key.Y.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for y")
+ }
+
+ x := key.X.bigInt()
+ y := key.Y.bigInt()
+
+ if !curve.IsOnCurve(x, y) {
+ return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
+ }
+
+ return &ecdsa.PublicKey{
+ Curve: curve,
+ X: x,
+ Y: y,
+ }, nil
+}
+
+func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
+ if pub == nil || pub.X == nil || pub.Y == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)")
+ }
+
+ name, err := curveName(pub.Curve)
+ if err != nil {
+ return nil, err
+ }
+
+ size := curveSize(pub.Curve)
+
+ xBytes := pub.X.Bytes()
+ yBytes := pub.Y.Bytes()
+
+ if len(xBytes) > size || len(yBytes) > size {
+ return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)")
+ }
+
+ key := &rawJSONWebKey{
+ Kty: "EC",
+ Crv: name,
+ X: newFixedSizeBuffer(xBytes, size),
+ Y: newFixedSizeBuffer(yBytes, size),
+ }
+
+ return key, nil
+}
+
+func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
+ var missing []string
+ switch {
+ case key.D == nil:
+ missing = append(missing, "D")
+ case key.X == nil:
+ missing = append(missing, "X")
+ }
+
+ if len(missing) > 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
+ }
+
+ privateKey := make([]byte, ed25519.PrivateKeySize)
+ copy(privateKey[0:32], key.D.bytes())
+ copy(privateKey[32:], key.X.bytes())
+ rv := ed25519.PrivateKey(privateKey)
+ return rv, nil
+}
+
+func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
+ if key.X == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid Ed key, missing x value")
+ }
+ publicKey := make([]byte, ed25519.PublicKeySize)
+ copy(publicKey[0:32], key.X.bytes())
+ rv := ed25519.PublicKey(publicKey)
+ return rv, nil
+}
+
+func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
+ var missing []string
+ switch {
+ case key.N == nil:
+ missing = append(missing, "N")
+ case key.E == nil:
+ missing = append(missing, "E")
+ case key.D == nil:
+ missing = append(missing, "D")
+ case key.P == nil:
+ missing = append(missing, "P")
+ case key.Q == nil:
+ missing = append(missing, "Q")
+ }
+
+ if len(missing) > 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
+ }
+
+ rv := &rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: key.N.bigInt(),
+ E: key.E.toInt(),
+ },
+ D: key.D.bigInt(),
+ Primes: []*big.Int{
+ key.P.bigInt(),
+ key.Q.bigInt(),
+ },
+ }
+
+ if key.Dp != nil {
+ rv.Precomputed.Dp = key.Dp.bigInt()
+ }
+ if key.Dq != nil {
+ rv.Precomputed.Dq = key.Dq.bigInt()
+ }
+ if key.Qi != nil {
+ rv.Precomputed.Qinv = key.Qi.bigInt()
+ }
+
+ err := rv.Validate()
+ return rv, err
+}
+
+func fromEdPrivateKey(ed ed25519.PrivateKey) (*rawJSONWebKey, error) {
+ raw := fromEdPublicKey(ed25519.PublicKey(ed[32:]))
+
+ raw.D = newBuffer(ed[0:32])
+ return raw, nil
+}
+
+func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJSONWebKey, error) {
+ if len(rsa.Primes) != 2 {
+ return nil, ErrUnsupportedKeyType
+ }
+
+ raw := fromRsaPublicKey(&rsa.PublicKey)
+
+ raw.D = newBuffer(rsa.D.Bytes())
+ raw.P = newBuffer(rsa.Primes[0].Bytes())
+ raw.Q = newBuffer(rsa.Primes[1].Bytes())
+
+ if rsa.Precomputed.Dp != nil {
+ raw.Dp = newBuffer(rsa.Precomputed.Dp.Bytes())
+ }
+ if rsa.Precomputed.Dq != nil {
+ raw.Dq = newBuffer(rsa.Precomputed.Dq.Bytes())
+ }
+ if rsa.Precomputed.Qinv != nil {
+ raw.Qi = newBuffer(rsa.Precomputed.Qinv.Bytes())
+ }
+
+ return raw, nil
+}
+
+func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
+ var curve elliptic.Curve
+ switch key.Crv {
+ case "P-256":
+ curve = elliptic.P256()
+ case "P-384":
+ curve = elliptic.P384()
+ case "P-521":
+ curve = elliptic.P521()
+ default:
+ return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
+ }
+
+ if key.X == nil || key.Y == nil || key.D == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
+ }
+
+ // The length of this octet string MUST be the full size of a coordinate for
+ // the curve specified in the "crv" parameter.
+ // https://tools.ietf.org/html/rfc7518#section-6.2.1.2
+ if curveSize(curve) != len(key.X.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for x")
+ }
+
+ if curveSize(curve) != len(key.Y.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for y")
+ }
+
+ // https://tools.ietf.org/html/rfc7518#section-6.2.2.1
+ if dSize(curve) != len(key.D.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for d")
+ }
+
+ x := key.X.bigInt()
+ y := key.Y.bigInt()
+
+ if !curve.IsOnCurve(x, y) {
+ return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
+ }
+
+ return &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: curve,
+ X: x,
+ Y: y,
+ },
+ D: key.D.bigInt(),
+ }, nil
+}
+
+func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) {
+ raw, err := fromEcPublicKey(&ec.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if ec.D == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key")
+ }
+
+ raw.D = newFixedSizeBuffer(ec.D.Bytes(), dSize(ec.PublicKey.Curve))
+
+ return raw, nil
+}
+
+// dSize returns the size in octets for the "d" member of an elliptic curve
+// private key.
+// The length of this octet string MUST be ceiling(log-base-2(n)/8)
+// octets (where n is the order of the curve).
+// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
+func dSize(curve elliptic.Curve) int {
+ order := curve.Params().P
+ bitLen := order.BitLen()
+ size := bitLen / 8
+ if bitLen%8 != 0 {
+ size = size + 1
+ }
+ return size
+}
+
+func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) {
+ return &rawJSONWebKey{
+ Kty: "oct",
+ K: newBuffer(key),
+ }, nil
+}
+
+func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
+ if key.K == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value")
+ }
+ return key.K.bytes(), nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/jws.go b/vendor/gopkg.in/square/go-jose.v2/jws.go
new file mode 100644
index 000000000..8b59b6ab2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/jws.go
@@ -0,0 +1,321 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "strings"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
+type rawJSONWebSignature struct {
+ Payload *byteBuffer `json:"payload,omitempty"`
+ Signatures []rawSignatureInfo `json:"signatures,omitempty"`
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Signature *byteBuffer `json:"signature,omitempty"`
+}
+
+// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
+type rawSignatureInfo struct {
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Signature *byteBuffer `json:"signature,omitempty"`
+}
+
+// JSONWebSignature represents a signed JWS object after parsing.
+type JSONWebSignature struct {
+ payload []byte
+ // Signatures attached to this object (may be more than one for multi-sig).
+ // Be careful about accessing these directly, prefer to use Verify() or
+ // VerifyMulti() to ensure that the data you're getting is verified.
+ Signatures []Signature
+}
+
+// Signature represents a single signature over the JWS payload and protected header.
+type Signature struct {
+ // Merged header fields. Contains both protected and unprotected header
+ // values. Prefer using Protected and Unprotected fields instead of this.
+ // Values in this header may or may not have been signed and in general
+ // should not be trusted.
+ Header Header
+
+ // Protected header. Values in this header were signed and
+ // will be verified as part of the signature verification process.
+ Protected Header
+
+ // Unprotected header. Values in this header were not signed
+ // and in general should not be trusted.
+ Unprotected Header
+
+ // The actual signature value
+ Signature []byte
+
+ protected *rawHeader
+ header *rawHeader
+ original *rawSignatureInfo
+}
+
+// ParseSigned parses a signed message in compact or full serialization format.
+func ParseSigned(input string) (*JSONWebSignature, error) {
+ input = stripWhitespace(input)
+ if strings.HasPrefix(input, "{") {
+ return parseSignedFull(input)
+ }
+
+ return parseSignedCompact(input)
+}
+
+// Get a header value
+func (sig Signature) mergedHeaders() rawHeader {
+ out := rawHeader{}
+ out.merge(sig.protected)
+ out.merge(sig.header)
+ return out
+}
+
+// Compute data to be signed
+func (obj JSONWebSignature) computeAuthData(payload []byte, signature *Signature) []byte {
+ var serializedProtected string
+
+ if signature.original != nil && signature.original.Protected != nil {
+ serializedProtected = signature.original.Protected.base64()
+ } else if signature.protected != nil {
+ serializedProtected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON(signature.protected))
+ } else {
+ serializedProtected = ""
+ }
+
+ return []byte(fmt.Sprintf("%s.%s",
+ serializedProtected,
+ base64.RawURLEncoding.EncodeToString(payload)))
+}
+
+// parseSignedFull parses a message in full format.
+func parseSignedFull(input string) (*JSONWebSignature, error) {
+ var parsed rawJSONWebSignature
+ err := json.Unmarshal([]byte(input), &parsed)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsed.sanitized()
+}
+
+// sanitized produces a cleaned-up JWS object from the raw JSON.
+func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
+ if parsed.Payload == nil {
+ return nil, fmt.Errorf("square/go-jose: missing payload in JWS message")
+ }
+
+ obj := &JSONWebSignature{
+ payload: parsed.Payload.bytes(),
+ Signatures: make([]Signature, len(parsed.Signatures)),
+ }
+
+ if len(parsed.Signatures) == 0 {
+ // No signatures array, must be flattened serialization
+ signature := Signature{}
+ if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
+ signature.protected = &rawHeader{}
+ err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if parsed.Header != nil && parsed.Header.getNonce() != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ signature.header = parsed.Header
+ signature.Signature = parsed.Signature.bytes()
+ // Make a fake "original" rawSignatureInfo to store the unprocessed
+ // Protected header. This is necessary because the Protected header can
+ // contain arbitrary fields not registered as part of the spec. See
+ // https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
+ // If we unmarshal Protected into a rawHeader with its explicit list of fields,
+ // we cannot marshal losslessly. So we have to keep around the original bytes.
+ // This is used in computeAuthData, which will first attempt to use
+ // the original bytes of a protected header, and fall back on marshaling the
+ // header struct only if those bytes are not available.
+ signature.original = &rawSignatureInfo{
+ Protected: parsed.Protected,
+ Header: parsed.Header,
+ Signature: parsed.Signature,
+ }
+
+ var err error
+ signature.Header, err = signature.mergedHeaders().sanitized()
+ if err != nil {
+ return nil, err
+ }
+
+ if signature.header != nil {
+ signature.Unprotected, err = signature.header.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if signature.protected != nil {
+ signature.Protected, err = signature.protected.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
+ jwk := signature.Header.JSONWebKey
+ if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
+ return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
+ }
+
+ obj.Signatures = append(obj.Signatures, signature)
+ }
+
+ for i, sig := range parsed.Signatures {
+ if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
+ obj.Signatures[i].protected = &rawHeader{}
+ err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if sig.Header != nil && sig.Header.getNonce() != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ var err error
+ obj.Signatures[i].Header, err = obj.Signatures[i].mergedHeaders().sanitized()
+ if err != nil {
+ return nil, err
+ }
+
+ if obj.Signatures[i].header != nil {
+ obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if obj.Signatures[i].protected != nil {
+ obj.Signatures[i].Protected, err = obj.Signatures[i].protected.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ obj.Signatures[i].Signature = sig.Signature.bytes()
+
+ // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
+ jwk := obj.Signatures[i].Header.JSONWebKey
+ if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
+ return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
+ }
+
+ // Copy value of sig
+ original := sig
+
+ obj.Signatures[i].header = sig.Header
+ obj.Signatures[i].original = &original
+ }
+
+ return obj, nil
+}
+
+// parseSignedCompact parses a message in compact format.
+func parseSignedCompact(input string) (*JSONWebSignature, error) {
+ parts := strings.Split(input, ".")
+ if len(parts) != 3 {
+ return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts")
+ }
+
+ rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
+ if err != nil {
+ return nil, err
+ }
+
+ payload, err := base64.RawURLEncoding.DecodeString(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ signature, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return nil, err
+ }
+
+ raw := &rawJSONWebSignature{
+ Payload: newBuffer(payload),
+ Protected: newBuffer(rawProtected),
+ Signature: newBuffer(signature),
+ }
+ return raw.sanitized()
+}
+
+// CompactSerialize serializes an object using the compact serialization format.
+func (obj JSONWebSignature) CompactSerialize() (string, error) {
+ if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
+ return "", ErrNotSupported
+ }
+
+ serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
+
+ return fmt.Sprintf(
+ "%s.%s.%s",
+ base64.RawURLEncoding.EncodeToString(serializedProtected),
+ base64.RawURLEncoding.EncodeToString(obj.payload),
+ base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)), nil
+}
+
+// FullSerialize serializes an object using the full JSON serialization format.
+func (obj JSONWebSignature) FullSerialize() string {
+ raw := rawJSONWebSignature{
+ Payload: newBuffer(obj.payload),
+ }
+
+ if len(obj.Signatures) == 1 {
+ if obj.Signatures[0].protected != nil {
+ serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
+ raw.Protected = newBuffer(serializedProtected)
+ }
+ raw.Header = obj.Signatures[0].header
+ raw.Signature = newBuffer(obj.Signatures[0].Signature)
+ } else {
+ raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
+ for i, signature := range obj.Signatures {
+ raw.Signatures[i] = rawSignatureInfo{
+ Header: signature.header,
+ Signature: newBuffer(signature.Signature),
+ }
+
+ if signature.protected != nil {
+ raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
+ }
+ }
+ }
+
+ return string(mustSerializeJSON(raw))
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/opaque.go b/vendor/gopkg.in/square/go-jose.v2/opaque.go
new file mode 100644
index 000000000..4a8bd8f32
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/opaque.go
@@ -0,0 +1,83 @@
+/*-
+ * Copyright 2018 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+// OpaqueSigner is an interface that supports signing payloads with opaque
+// private key(s). Private key operations preformed by implementors may, for
+// example, occur in a hardware module. An OpaqueSigner may rotate signing keys
+// transparently to the user of this interface.
+type OpaqueSigner interface {
+ // Public returns the public key of the current signing key.
+ Public() *JSONWebKey
+ // Algs returns a list of supported signing algorithms.
+ Algs() []SignatureAlgorithm
+ // SignPayload signs a payload with the current signing key using the given
+ // algorithm.
+ SignPayload(payload []byte, alg SignatureAlgorithm) ([]byte, error)
+}
+
+type opaqueSigner struct {
+ signer OpaqueSigner
+}
+
+func newOpaqueSigner(alg SignatureAlgorithm, signer OpaqueSigner) (recipientSigInfo, error) {
+ var algSupported bool
+ for _, salg := range signer.Algs() {
+ if alg == salg {
+ algSupported = true
+ break
+ }
+ }
+ if !algSupported {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientSigInfo{
+ sigAlg: alg,
+ publicKey: signer.Public,
+ signer: &opaqueSigner{
+ signer: signer,
+ },
+ }, nil
+}
+
+func (o *opaqueSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ out, err := o.signer.SignPayload(payload, alg)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// OpaqueVerifier is an interface that supports verifying payloads with opaque
+// public key(s). An OpaqueSigner may rotate signing keys transparently to the
+// user of this interface.
+type OpaqueVerifier interface {
+ VerifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
+}
+
+type opaqueVerifier struct {
+ verifier OpaqueVerifier
+}
+
+func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ return o.verifier.VerifyPayload(payload, signature, alg)
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/shared.go b/vendor/gopkg.in/square/go-jose.v2/shared.go
new file mode 100644
index 000000000..b0a6255ec
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/shared.go
@@ -0,0 +1,499 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/elliptic"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// KeyAlgorithm represents a key management algorithm.
+type KeyAlgorithm string
+
+// SignatureAlgorithm represents a signature (or MAC) algorithm.
+type SignatureAlgorithm string
+
+// ContentEncryption represents a content encryption algorithm.
+type ContentEncryption string
+
+// CompressionAlgorithm represents an algorithm used for plaintext compression.
+type CompressionAlgorithm string
+
+// ContentType represents type of the contained data.
+type ContentType string
+
+var (
+ // ErrCryptoFailure represents an error in cryptographic primitive. This
+ // occurs when, for example, a message had an invalid authentication tag or
+ // could not be decrypted.
+ ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive")
+
+ // ErrUnsupportedAlgorithm indicates that a selected algorithm is not
+ // supported. This occurs when trying to instantiate an encrypter for an
+ // algorithm that is not yet implemented.
+ ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm")
+
+ // ErrUnsupportedKeyType indicates that the given key type/format is not
+ // supported. This occurs when trying to instantiate an encrypter and passing
+ // it a key of an unrecognized type or with unsupported parameters, such as
+ // an RSA private key with more than two primes.
+ ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format")
+
+ // ErrInvalidKeySize indicates that the given key is not the correct size
+ // for the selected algorithm. This can occur, for example, when trying to
+ // encrypt with AES-256 but passing only a 128-bit key as input.
+ ErrInvalidKeySize = errors.New("square/go-jose: invalid key size for algorithm")
+
+ // ErrNotSupported serialization of object is not supported. This occurs when
+ // trying to compact-serialize an object which can't be represented in
+ // compact form.
+ ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object")
+
+ // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
+ // nonce header parameter was included in an unprotected header object.
+ ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header")
+)
+
+// Key management algorithms
+const (
+ ED25519 = KeyAlgorithm("ED25519")
+ RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
+ RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
+ RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
+ A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
+ A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
+ A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
+ DIRECT = KeyAlgorithm("dir") // Direct encryption
+ ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
+ ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
+ ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
+ ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
+ A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
+ A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
+ A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
+ PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
+ PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
+ PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
+)
+
+// Signature algorithms
+const (
+ EdDSA = SignatureAlgorithm("EdDSA")
+ HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
+ HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
+ HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
+ RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
+ RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
+ RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
+ ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
+ ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
+ ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
+ PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
+ PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
+ PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
+)
+
+// Content encryption algorithms
+const (
+ A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
+ A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
+ A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
+ A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
+ A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
+ A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
+)
+
+// Compression algorithms
+const (
+ NONE = CompressionAlgorithm("") // No compression
+ DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
+)
+
+// A key in the protected header of a JWS object. Use of the Header...
+// constants is preferred to enhance type safety.
+type HeaderKey string
+
+const (
+ HeaderType HeaderKey = "typ" // string
+ HeaderContentType = "cty" // string
+
+ // These are set by go-jose and shouldn't need to be set by consumers of the
+ // library.
+ headerAlgorithm = "alg" // string
+ headerEncryption = "enc" // ContentEncryption
+ headerCompression = "zip" // CompressionAlgorithm
+ headerCritical = "crit" // []string
+
+ headerAPU = "apu" // *byteBuffer
+ headerAPV = "apv" // *byteBuffer
+ headerEPK = "epk" // *JSONWebKey
+ headerIV = "iv" // *byteBuffer
+ headerTag = "tag" // *byteBuffer
+ headerX5c = "x5c" // []*x509.Certificate
+
+ headerJWK = "jwk" // *JSONWebKey
+ headerKeyID = "kid" // string
+ headerNonce = "nonce" // string
+
+ headerP2C = "p2c" // *byteBuffer (int)
+ headerP2S = "p2s" // *byteBuffer ([]byte)
+
+)
+
+// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
+//
+// The decoding of the constituent items is deferred because we want to marshal
+// some members into particular structs rather than generic maps, but at the
+// same time we need to receive any extra fields unhandled by this library to
+// pass through to consuming code in case it wants to examine them.
+type rawHeader map[HeaderKey]*json.RawMessage
+
+// Header represents the read-only JOSE header for JWE/JWS objects.
+type Header struct {
+ KeyID string
+ JSONWebKey *JSONWebKey
+ Algorithm string
+ Nonce string
+
+ // Unverified certificate chain parsed from x5c header.
+ certificates []*x509.Certificate
+
+ // Any headers not recognised above get unmarshaled
+ // from JSON in a generic manner and placed in this map.
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// Certificates verifies & returns the certificate chain present
+// in the x5c header field of a message, if one was present. Returns
+// an error if there was no x5c header present or the chain could
+// not be validated with the given verify options.
+func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) {
+ if len(h.certificates) == 0 {
+ return nil, errors.New("square/go-jose: no x5c header present in message")
+ }
+
+ leaf := h.certificates[0]
+ if opts.Intermediates == nil {
+ opts.Intermediates = x509.NewCertPool()
+ for _, intermediate := range h.certificates[1:] {
+ opts.Intermediates.AddCert(intermediate)
+ }
+ }
+
+ return leaf.Verify(opts)
+}
+
+func (parsed rawHeader) set(k HeaderKey, v interface{}) error {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+
+ parsed[k] = makeRawMessage(b)
+ return nil
+}
+
+// getString gets a string from the raw JSON, defaulting to "".
+func (parsed rawHeader) getString(k HeaderKey) string {
+ v, ok := parsed[k]
+ if !ok || v == nil {
+ return ""
+ }
+ var s string
+ err := json.Unmarshal(*v, &s)
+ if err != nil {
+ return ""
+ }
+ return s
+}
+
+// getByteBuffer gets a byte buffer from the raw JSON. Returns (nil, nil) if
+// not specified.
+func (parsed rawHeader) getByteBuffer(k HeaderKey) (*byteBuffer, error) {
+ v := parsed[k]
+ if v == nil {
+ return nil, nil
+ }
+ var bb *byteBuffer
+ err := json.Unmarshal(*v, &bb)
+ if err != nil {
+ return nil, err
+ }
+ return bb, nil
+}
+
+// getAlgorithm extracts parsed "alg" from the raw JSON as a KeyAlgorithm.
+func (parsed rawHeader) getAlgorithm() KeyAlgorithm {
+ return KeyAlgorithm(parsed.getString(headerAlgorithm))
+}
+
+// getSignatureAlgorithm extracts parsed "alg" from the raw JSON as a SignatureAlgorithm.
+func (parsed rawHeader) getSignatureAlgorithm() SignatureAlgorithm {
+ return SignatureAlgorithm(parsed.getString(headerAlgorithm))
+}
+
+// getEncryption extracts parsed "enc" from the raw JSON.
+func (parsed rawHeader) getEncryption() ContentEncryption {
+ return ContentEncryption(parsed.getString(headerEncryption))
+}
+
+// getCompression extracts parsed "zip" from the raw JSON.
+func (parsed rawHeader) getCompression() CompressionAlgorithm {
+ return CompressionAlgorithm(parsed.getString(headerCompression))
+}
+
+func (parsed rawHeader) getNonce() string {
+ return parsed.getString(headerNonce)
+}
+
+// getEPK extracts parsed "epk" from the raw JSON.
+func (parsed rawHeader) getEPK() (*JSONWebKey, error) {
+ v := parsed[headerEPK]
+ if v == nil {
+ return nil, nil
+ }
+ var epk *JSONWebKey
+ err := json.Unmarshal(*v, &epk)
+ if err != nil {
+ return nil, err
+ }
+ return epk, nil
+}
+
+// getAPU extracts parsed "apu" from the raw JSON.
+func (parsed rawHeader) getAPU() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerAPU)
+}
+
+// getAPV extracts parsed "apv" from the raw JSON.
+func (parsed rawHeader) getAPV() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerAPV)
+}
+
+// getIV extracts parsed "iv" frpom the raw JSON.
+func (parsed rawHeader) getIV() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerIV)
+}
+
+// getTag extracts parsed "tag" frpom the raw JSON.
+func (parsed rawHeader) getTag() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerTag)
+}
+
+// getJWK extracts parsed "jwk" from the raw JSON.
+func (parsed rawHeader) getJWK() (*JSONWebKey, error) {
+ v := parsed[headerJWK]
+ if v == nil {
+ return nil, nil
+ }
+ var jwk *JSONWebKey
+ err := json.Unmarshal(*v, &jwk)
+ if err != nil {
+ return nil, err
+ }
+ return jwk, nil
+}
+
+// getCritical extracts parsed "crit" from the raw JSON. If omitted, it
+// returns an empty slice.
+func (parsed rawHeader) getCritical() ([]string, error) {
+ v := parsed[headerCritical]
+ if v == nil {
+ return nil, nil
+ }
+
+ var q []string
+ err := json.Unmarshal(*v, &q)
+ if err != nil {
+ return nil, err
+ }
+ return q, nil
+}
+
+// getS2C extracts parsed "p2c" from the raw JSON.
+func (parsed rawHeader) getP2C() (int, error) {
+ v := parsed[headerP2C]
+ if v == nil {
+ return 0, nil
+ }
+
+ var p2c int
+ err := json.Unmarshal(*v, &p2c)
+ if err != nil {
+ return 0, err
+ }
+ return p2c, nil
+}
+
+// getS2S extracts parsed "p2s" from the raw JSON.
+func (parsed rawHeader) getP2S() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerP2S)
+}
+
+// sanitized produces a cleaned-up header object from the raw JSON.
+func (parsed rawHeader) sanitized() (h Header, err error) {
+ for k, v := range parsed {
+ if v == nil {
+ continue
+ }
+ switch k {
+ case headerJWK:
+ var jwk *JSONWebKey
+ err = json.Unmarshal(*v, &jwk)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal JWK: %v: %#v", err, string(*v))
+ return
+ }
+ h.JSONWebKey = jwk
+ case headerKeyID:
+ var s string
+ err = json.Unmarshal(*v, &s)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal key ID: %v: %#v", err, string(*v))
+ return
+ }
+ h.KeyID = s
+ case headerAlgorithm:
+ var s string
+ err = json.Unmarshal(*v, &s)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal algorithm: %v: %#v", err, string(*v))
+ return
+ }
+ h.Algorithm = s
+ case headerNonce:
+ var s string
+ err = json.Unmarshal(*v, &s)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal nonce: %v: %#v", err, string(*v))
+ return
+ }
+ h.Nonce = s
+ case headerX5c:
+ c := []string{}
+ err = json.Unmarshal(*v, &c)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
+ return
+ }
+ h.certificates, err = parseCertificateChain(c)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
+ return
+ }
+ default:
+ if h.ExtraHeaders == nil {
+ h.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ var v2 interface{}
+ err = json.Unmarshal(*v, &v2)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal value: %v: %#v", err, string(*v))
+ return
+ }
+ h.ExtraHeaders[k] = v2
+ }
+ }
+ return
+}
+
+func parseCertificateChain(chain []string) ([]*x509.Certificate, error) {
+ out := make([]*x509.Certificate, len(chain))
+ for i, cert := range chain {
+ raw, err := base64.StdEncoding.DecodeString(cert)
+ if err != nil {
+ return nil, err
+ }
+ out[i], err = x509.ParseCertificate(raw)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+func (dst rawHeader) isSet(k HeaderKey) bool {
+ dvr := dst[k]
+ if dvr == nil {
+ return false
+ }
+
+ var dv interface{}
+ err := json.Unmarshal(*dvr, &dv)
+ if err != nil {
+ return true
+ }
+
+ if dvStr, ok := dv.(string); ok {
+ return dvStr != ""
+ }
+
+ return true
+}
+
+// Merge headers from src into dst, giving precedence to headers from l.
+func (dst rawHeader) merge(src *rawHeader) {
+ if src == nil {
+ return
+ }
+
+ for k, v := range *src {
+ if dst.isSet(k) {
+ continue
+ }
+
+ dst[k] = v
+ }
+}
+
+// Get JOSE name of curve
+func curveName(crv elliptic.Curve) (string, error) {
+ switch crv {
+ case elliptic.P256():
+ return "P-256", nil
+ case elliptic.P384():
+ return "P-384", nil
+ case elliptic.P521():
+ return "P-521", nil
+ default:
+ return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve")
+ }
+}
+
+// Get size of curve in bytes
+func curveSize(crv elliptic.Curve) int {
+ bits := crv.Params().BitSize
+
+ div := bits / 8
+ mod := bits % 8
+
+ if mod == 0 {
+ return div
+ }
+
+ return div + 1
+}
+
+func makeRawMessage(b []byte) *json.RawMessage {
+ rm := json.RawMessage(b)
+ return &rm
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/signing.go b/vendor/gopkg.in/square/go-jose.v2/signing.go
new file mode 100644
index 000000000..be6cf0481
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/signing.go
@@ -0,0 +1,389 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "encoding/base64"
+ "errors"
+ "fmt"
+
+ "golang.org/x/crypto/ed25519"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// NonceSource represents a source of random nonces to go into JWS objects
+type NonceSource interface {
+ Nonce() (string, error)
+}
+
+// Signer represents a signer which takes a payload and produces a signed JWS object.
+type Signer interface {
+ Sign(payload []byte) (*JSONWebSignature, error)
+ Options() SignerOptions
+}
+
+// SigningKey represents an algorithm/key used to sign a message.
+type SigningKey struct {
+ Algorithm SignatureAlgorithm
+ Key interface{}
+}
+
+// SignerOptions represents options that can be set when creating signers.
+type SignerOptions struct {
+ NonceSource NonceSource
+ EmbedJWK bool
+
+ // Optional map of additional keys to be inserted into the protected header
+ // of a JWS object. Some specifications which make use of JWS like to insert
+ // additional values here. All values must be JSON-serializable.
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary. It returns itself and so can be used in a fluent style.
+func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
+ if so.ExtraHeaders == nil {
+ so.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ so.ExtraHeaders[k] = v
+ return so
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// SignerOptions.
+func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions {
+ return so.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated SignerOptions.
+func (so *SignerOptions) WithType(typ ContentType) *SignerOptions {
+ return so.WithHeader(HeaderType, typ)
+}
+
+type payloadSigner interface {
+ signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
+}
+
+type payloadVerifier interface {
+ verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
+}
+
+type genericSigner struct {
+ recipients []recipientSigInfo
+ nonceSource NonceSource
+ embedJWK bool
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientSigInfo struct {
+ sigAlg SignatureAlgorithm
+ publicKey func() *JSONWebKey
+ signer payloadSigner
+}
+
+func staticPublicKey(jwk *JSONWebKey) func() *JSONWebKey {
+ return func() *JSONWebKey {
+ return jwk
+ }
+}
+
+// NewSigner creates an appropriate signer based on the key type
+func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) {
+ return NewMultiSigner([]SigningKey{sig}, opts)
+}
+
+// NewMultiSigner creates a signer for multiple recipients
+func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) {
+ signer := &genericSigner{recipients: []recipientSigInfo{}}
+
+ if opts != nil {
+ signer.nonceSource = opts.NonceSource
+ signer.embedJWK = opts.EmbedJWK
+ signer.extraHeaders = opts.ExtraHeaders
+ }
+
+ for _, sig := range sigs {
+ err := signer.addRecipient(sig.Algorithm, sig.Key)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return signer, nil
+}
+
+// newVerifier creates a verifier based on the key type
+func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
+ switch verificationKey := verificationKey.(type) {
+ case ed25519.PublicKey:
+ return &edEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case *rsa.PublicKey:
+ return &rsaEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case *ecdsa.PublicKey:
+ return &ecEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case []byte:
+ return &symmetricMac{
+ key: verificationKey,
+ }, nil
+ case JSONWebKey:
+ return newVerifier(verificationKey.Key)
+ case *JSONWebKey:
+ return newVerifier(verificationKey.Key)
+ }
+ if ov, ok := verificationKey.(OpaqueVerifier); ok {
+ return &opaqueVerifier{verifier: ov}, nil
+ }
+ return nil, ErrUnsupportedKeyType
+}
+
+func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
+ recipient, err := makeJWSRecipient(alg, signingKey)
+ if err != nil {
+ return err
+ }
+
+ ctx.recipients = append(ctx.recipients, recipient)
+ return nil
+}
+
+func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
+ switch signingKey := signingKey.(type) {
+ case ed25519.PrivateKey:
+ return newEd25519Signer(alg, signingKey)
+ case *rsa.PrivateKey:
+ return newRSASigner(alg, signingKey)
+ case *ecdsa.PrivateKey:
+ return newECDSASigner(alg, signingKey)
+ case []byte:
+ return newSymmetricSigner(alg, signingKey)
+ case JSONWebKey:
+ return newJWKSigner(alg, signingKey)
+ case *JSONWebKey:
+ return newJWKSigner(alg, *signingKey)
+ }
+ if signer, ok := signingKey.(OpaqueSigner); ok {
+ return newOpaqueSigner(alg, signer)
+ }
+ return recipientSigInfo{}, ErrUnsupportedKeyType
+}
+
+func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
+ recipient, err := makeJWSRecipient(alg, signingKey.Key)
+ if err != nil {
+ return recipientSigInfo{}, err
+ }
+ if recipient.publicKey != nil && recipient.publicKey() != nil {
+ // recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo
+ // was created for the inner key (such as a RSA or ECDSA public key). It contains
+ // the pub key for embedding, but doesn't have extra params like key id.
+ publicKey := signingKey
+ publicKey.Key = recipient.publicKey().Key
+ recipient.publicKey = staticPublicKey(&publicKey)
+
+ // This should be impossible, but let's check anyway.
+ if !recipient.publicKey().IsPublic() {
+ return recipientSigInfo{}, errors.New("square/go-jose: public key was unexpectedly not public")
+ }
+ }
+ return recipient, nil
+}
+
+func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
+ obj := &JSONWebSignature{}
+ obj.payload = payload
+ obj.Signatures = make([]Signature, len(ctx.recipients))
+
+ for i, recipient := range ctx.recipients {
+ protected := map[HeaderKey]interface{}{
+ headerAlgorithm: string(recipient.sigAlg),
+ }
+
+ if recipient.publicKey != nil && recipient.publicKey() != nil {
+ // We want to embed the JWK or set the kid header, but not both. Having a protected
+ // header that contains an embedded JWK while also simultaneously containing the kid
+ // header is confusing, and at least in ACME the two are considered to be mutually
+ // exclusive. The fact that both can exist at the same time is a somewhat unfortunate
+ // result of the JOSE spec. We've decided that this library will only include one or
+ // the other to avoid this confusion.
+ //
+ // See https://github.com/square/go-jose/issues/157 for more context.
+ if ctx.embedJWK {
+ protected[headerJWK] = recipient.publicKey()
+ } else {
+ protected[headerKeyID] = recipient.publicKey().KeyID
+ }
+ }
+
+ if ctx.nonceSource != nil {
+ nonce, err := ctx.nonceSource.Nonce()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err)
+ }
+ protected[headerNonce] = nonce
+ }
+
+ for k, v := range ctx.extraHeaders {
+ protected[k] = v
+ }
+
+ serializedProtected := mustSerializeJSON(protected)
+
+ input := []byte(fmt.Sprintf("%s.%s",
+ base64.RawURLEncoding.EncodeToString(serializedProtected),
+ base64.RawURLEncoding.EncodeToString(payload)))
+
+ signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ signatureInfo.protected = &rawHeader{}
+ for k, v := range protected {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: Error marshalling item %#v: %v", k, err)
+ }
+ (*signatureInfo.protected)[k] = makeRawMessage(b)
+ }
+ obj.Signatures[i] = signatureInfo
+ }
+
+ return obj, nil
+}
+
+func (ctx *genericSigner) Options() SignerOptions {
+ return SignerOptions{
+ NonceSource: ctx.nonceSource,
+ EmbedJWK: ctx.embedJWK,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Verify validates the signature on the object and returns the payload.
+// This function does not support multi-signature, if you desire multi-sig
+// verification use VerifyMulti instead.
+//
+// Be careful when verifying signatures based on embedded JWKs inside the
+// payload header. You cannot assume that the key received in a payload is
+// trusted.
+func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
+ err := obj.DetachedVerify(obj.payload, verificationKey)
+ if err != nil {
+ return nil, err
+ }
+ return obj.payload, nil
+}
+
+// UnsafePayloadWithoutVerification returns the payload without
+// verifying it. The content returned from this function cannot be
+// trusted.
+func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
+ return obj.payload
+}
+
+// DetachedVerify validates a detached signature on the given payload. In
+// most cases, you will probably want to use Verify instead. DetachedVerify
+// is only useful if you have a payload and signature that are separated from
+// each other.
+func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
+ verifier, err := newVerifier(verificationKey)
+ if err != nil {
+ return err
+ }
+
+ if len(obj.Signatures) > 1 {
+ return errors.New("square/go-jose: too many signatures in payload; expecting only one")
+ }
+
+ signature := obj.Signatures[0]
+ headers := signature.mergedHeaders()
+ critical, err := headers.getCritical()
+ if err != nil {
+ return err
+ }
+ if len(critical) > 0 {
+ // Unsupported crit header
+ return ErrCryptoFailure
+ }
+
+ input := obj.computeAuthData(payload, &signature)
+ alg := headers.getSignatureAlgorithm()
+ err = verifier.verifyPayload(input, signature.Signature, alg)
+ if err == nil {
+ return nil
+ }
+
+ return ErrCryptoFailure
+}
+
+// VerifyMulti validates (one of the multiple) signatures on the object and
+// returns the index of the signature that was verified, along with the signature
+// object and the payload. We return the signature and index to guarantee that
+// callers are getting the verified value.
+func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
+ idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
+ if err != nil {
+ return -1, Signature{}, nil, err
+ }
+ return idx, sig, obj.payload, nil
+}
+
+// DetachedVerifyMulti validates a detached signature on the given payload with
+// a signature/object that has potentially multiple signers. This returns the index
+// of the signature that was verified, along with the signature object. We return
+// the signature and index to guarantee that callers are getting the verified value.
+//
+// In most cases, you will probably want to use Verify or VerifyMulti instead.
+// DetachedVerifyMulti is only useful if you have a payload and signature that are
+// separated from each other, and the signature can have multiple signers at the
+// same time.
+func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
+ verifier, err := newVerifier(verificationKey)
+ if err != nil {
+ return -1, Signature{}, err
+ }
+
+ for i, signature := range obj.Signatures {
+ headers := signature.mergedHeaders()
+ critical, err := headers.getCritical()
+ if err != nil {
+ continue
+ }
+ if len(critical) > 0 {
+ // Unsupported crit header
+ continue
+ }
+
+ input := obj.computeAuthData(payload, &signature)
+ alg := headers.getSignatureAlgorithm()
+ err = verifier.verifyPayload(input, signature.Signature, alg)
+ if err == nil {
+ return i, signature, nil
+ }
+ }
+
+ return -1, Signature{}, ErrCryptoFailure
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/symmetric.go b/vendor/gopkg.in/square/go-jose.v2/symmetric.go
new file mode 100644
index 000000000..264a0fe37
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/symmetric.go
@@ -0,0 +1,482 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+
+ "golang.org/x/crypto/pbkdf2"
+ "gopkg.in/square/go-jose.v2/cipher"
+)
+
+// Random reader (stubbed out in tests)
+var RandReader = rand.Reader
+
+const (
+ // RFC7518 recommends a minimum of 1,000 iterations:
+ // https://tools.ietf.org/html/rfc7518#section-4.8.1.2
+ // NIST recommends a minimum of 10,000:
+ // https://pages.nist.gov/800-63-3/sp800-63b.html
+ // 1Password uses 100,000:
+ // https://support.1password.com/pbkdf2/
+ defaultP2C = 100000
+ // Default salt size: 128 bits
+ defaultP2SSize = 16
+)
+
+// Dummy key cipher for shared symmetric key mode
+type symmetricKeyCipher struct {
+ key []byte // Pre-shared content-encryption key
+ p2c int // PBES2 Count
+ p2s []byte // PBES2 Salt Input
+}
+
+// Signer/verifier for MAC modes
+type symmetricMac struct {
+ key []byte
+}
+
+// Input/output from an AEAD operation
+type aeadParts struct {
+ iv, ciphertext, tag []byte
+}
+
+// A content cipher based on an AEAD construction
+type aeadContentCipher struct {
+ keyBytes int
+ authtagBytes int
+ getAead func(key []byte) (cipher.AEAD, error)
+}
+
+// Random key generator
+type randomKeyGenerator struct {
+ size int
+}
+
+// Static key generator
+type staticKeyGenerator struct {
+ key []byte
+}
+
+// Create a new content cipher based on AES-GCM
+func newAESGCM(keySize int) contentCipher {
+ return &aeadContentCipher{
+ keyBytes: keySize,
+ authtagBytes: 16,
+ getAead: func(key []byte) (cipher.AEAD, error) {
+ aes, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return cipher.NewGCM(aes)
+ },
+ }
+}
+
+// Create a new content cipher based on AES-CBC+HMAC
+func newAESCBC(keySize int) contentCipher {
+ return &aeadContentCipher{
+ keyBytes: keySize * 2,
+ authtagBytes: keySize,
+ getAead: func(key []byte) (cipher.AEAD, error) {
+ return josecipher.NewCBCHMAC(key, aes.NewCipher)
+ },
+ }
+}
+
+// Get an AEAD cipher object for the given content encryption algorithm
+func getContentCipher(alg ContentEncryption) contentCipher {
+ switch alg {
+ case A128GCM:
+ return newAESGCM(16)
+ case A192GCM:
+ return newAESGCM(24)
+ case A256GCM:
+ return newAESGCM(32)
+ case A128CBC_HS256:
+ return newAESCBC(16)
+ case A192CBC_HS384:
+ return newAESCBC(24)
+ case A256CBC_HS512:
+ return newAESCBC(32)
+ default:
+ return nil
+ }
+}
+
+// getPbkdf2Params returns the key length and hash function used in
+// pbkdf2.Key.
+func getPbkdf2Params(alg KeyAlgorithm) (int, func() hash.Hash) {
+ switch alg {
+ case PBES2_HS256_A128KW:
+ return 16, sha256.New
+ case PBES2_HS384_A192KW:
+ return 24, sha512.New384
+ case PBES2_HS512_A256KW:
+ return 32, sha512.New
+ default:
+ panic("invalid algorithm")
+ }
+}
+
+// getRandomSalt generates a new salt of the given size.
+func getRandomSalt(size int) ([]byte, error) {
+ salt := make([]byte, size)
+ _, err := io.ReadFull(RandReader, salt)
+ if err != nil {
+ return nil, err
+ }
+
+ return salt, nil
+}
+
+// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
+func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
+ switch keyAlg {
+ case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &symmetricKeyCipher{
+ key: key,
+ },
+ }, nil
+}
+
+// newSymmetricSigner creates a recipientSigInfo based on the given key.
+func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case HS256, HS384, HS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ signer: &symmetricMac{
+ key: key,
+ },
+ }, nil
+}
+
+// Generate a random key for the given content cipher
+func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ key := make([]byte, ctx.size)
+ _, err := io.ReadFull(RandReader, key)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ return key, rawHeader{}, nil
+}
+
+// Key size for random generator
+func (ctx randomKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Generate a static key (for direct mode)
+func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ cek := make([]byte, len(ctx.key))
+ copy(cek, ctx.key)
+ return cek, rawHeader{}, nil
+}
+
+// Key size for static generator
+func (ctx staticKeyGenerator) keySize() int {
+ return len(ctx.key)
+}
+
+// Get key size for this cipher
+func (ctx aeadContentCipher) keySize() int {
+ return ctx.keyBytes
+}
+
+// Encrypt some data
+func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
+ // Get a new AEAD instance
+ aead, err := ctx.getAead(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize a new nonce
+ iv := make([]byte, aead.NonceSize())
+ _, err = io.ReadFull(RandReader, iv)
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
+ offset := len(ciphertextAndTag) - ctx.authtagBytes
+
+ return &aeadParts{
+ iv: iv,
+ ciphertext: ciphertextAndTag[:offset],
+ tag: ciphertextAndTag[offset:],
+ }, nil
+}
+
+// Decrypt some data
+func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
+ aead, err := ctx.getAead(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(parts.iv) != aead.NonceSize() || len(parts.tag) < ctx.authtagBytes {
+ return nil, ErrCryptoFailure
+ }
+
+ return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
+}
+
+// Encrypt the content encryption key.
+func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case DIRECT:
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case A128GCMKW, A192GCMKW, A256GCMKW:
+ aead := newAESGCM(len(ctx.key))
+
+ parts, err := aead.encrypt(ctx.key, []byte{}, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ header := &rawHeader{}
+ header.set(headerIV, newBuffer(parts.iv))
+ header.set(headerTag, newBuffer(parts.tag))
+
+ return recipientInfo{
+ header: header,
+ encryptedKey: parts.ciphertext,
+ }, nil
+ case A128KW, A192KW, A256KW:
+ block, err := aes.NewCipher(ctx.key)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &rawHeader{},
+ }, nil
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if len(ctx.p2s) == 0 {
+ salt, err := getRandomSalt(defaultP2SSize)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+ ctx.p2s = salt
+ }
+
+ if ctx.p2c <= 0 {
+ ctx.p2c = defaultP2C
+ }
+
+ // salt is UTF8(Alg) || 0x00 || Salt Input
+ salt := bytes.Join([][]byte{[]byte(alg), ctx.p2s}, []byte{0x00})
+
+ // derive key
+ keyLen, h := getPbkdf2Params(alg)
+ key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h)
+
+ // use AES cipher with derived key
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ header := &rawHeader{}
+ header.set(headerP2C, ctx.p2c)
+ header.set(headerP2S, newBuffer(ctx.p2s))
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: header,
+ }, nil
+ }
+
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the content encryption key.
+func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ switch headers.getAlgorithm() {
+ case DIRECT:
+ cek := make([]byte, len(ctx.key))
+ copy(cek, ctx.key)
+ return cek, nil
+ case A128GCMKW, A192GCMKW, A256GCMKW:
+ aead := newAESGCM(len(ctx.key))
+
+ iv, err := headers.getIV()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid IV: %v", err)
+ }
+ tag, err := headers.getTag()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid tag: %v", err)
+ }
+
+ parts := &aeadParts{
+ iv: iv.bytes(),
+ ciphertext: recipient.encryptedKey,
+ tag: tag.bytes(),
+ }
+
+ cek, err := aead.decrypt(ctx.key, []byte{}, parts)
+ if err != nil {
+ return nil, err
+ }
+
+ return cek, nil
+ case A128KW, A192KW, A256KW:
+ block, err := aes.NewCipher(ctx.key)
+ if err != nil {
+ return nil, err
+ }
+
+ cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
+ if err != nil {
+ return nil, err
+ }
+ return cek, nil
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ p2s, err := headers.getP2S()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid P2S: %v", err)
+ }
+ if p2s == nil || len(p2s.data) == 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid P2S: must be present")
+ }
+
+ p2c, err := headers.getP2C()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid P2C: %v", err)
+ }
+ if p2c <= 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid P2C: must be a positive integer")
+ }
+
+ // salt is UTF8(Alg) || 0x00 || Salt Input
+ alg := headers.getAlgorithm()
+ salt := bytes.Join([][]byte{[]byte(alg), p2s.bytes()}, []byte{0x00})
+
+ // derive key
+ keyLen, h := getPbkdf2Params(alg)
+ key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h)
+
+ // use AES cipher with derived key
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
+ if err != nil {
+ return nil, err
+ }
+ return cek, nil
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ mac, err := ctx.hmac(payload, alg)
+ if err != nil {
+ return Signature{}, errors.New("square/go-jose: failed to compute hmac")
+ }
+
+ return Signature{
+ Signature: mac,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
+ expected, err := ctx.hmac(payload, alg)
+ if err != nil {
+ return errors.New("square/go-jose: failed to compute hmac")
+ }
+
+ if len(mac) != len(expected) {
+ return errors.New("square/go-jose: invalid hmac")
+ }
+
+ match := subtle.ConstantTimeCompare(mac, expected)
+ if match != 1 {
+ return errors.New("square/go-jose: invalid hmac")
+ }
+
+ return nil
+}
+
+// Compute the HMAC based on the given alg value
+func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
+ var hash func() hash.Hash
+
+ switch alg {
+ case HS256:
+ hash = sha256.New
+ case HS384:
+ hash = sha512.New384
+ case HS512:
+ hash = sha512.New
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ hmac := hmac.New(hash, ctx.key)
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hmac.Write(payload)
+ return hmac.Sum(nil), nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index e6360aa4f..d295c700d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -78,7 +78,7 @@ github.com/containers/buildah/util
# github.com/containers/common v0.0.3
github.com/containers/common/pkg/cgroups
github.com/containers/common/pkg/unshare
-# github.com/containers/image/v5 v5.0.0
+# github.com/containers/image/v5 v5.1.0
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
@@ -118,6 +118,16 @@ github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b
github.com/containers/libtrust
+# github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741
+github.com/containers/ocicrypt
+github.com/containers/ocicrypt/blockcipher
+github.com/containers/ocicrypt/config
+github.com/containers/ocicrypt/keywrap
+github.com/containers/ocicrypt/keywrap/jwe
+github.com/containers/ocicrypt/keywrap/pgp
+github.com/containers/ocicrypt/keywrap/pkcs7
+github.com/containers/ocicrypt/spec
+github.com/containers/ocicrypt/utils
# github.com/containers/psgo v1.4.0
github.com/containers/psgo
github.com/containers/psgo/internal/capabilities
@@ -255,6 +265,8 @@ github.com/fatih/camelcase
github.com/fsnotify/fsnotify
# github.com/fsouza/go-dockerclient v1.6.0
github.com/fsouza/go-dockerclient
+# github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
+github.com/fullsailor/pkcs7
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
# github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e
@@ -288,7 +300,7 @@ github.com/hpcloud/tail/ratelimiter
github.com/hpcloud/tail/util
github.com/hpcloud/tail/watch
github.com/hpcloud/tail/winfile
-# github.com/imdario/mergo v0.3.7
+# github.com/imdario/mergo v0.3.8
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0
github.com/inconshreveable/mousetrap
@@ -307,8 +319,6 @@ github.com/klauspost/compress/zstd/internal/xxhash
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.2
github.com/konsorten/go-windows-terminal-sequences
-# github.com/mattn/go-isatty v0.0.8
-github.com/mattn/go-isatty
# github.com/mattn/go-shellwords v1.0.6
github.com/mattn/go-shellwords
# github.com/matttproud/golang_protobuf_extensions v1.0.1
@@ -485,11 +495,11 @@ github.com/varlink/go/varlink/idl
github.com/vbatts/tar-split/archive/tar
github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
-# github.com/vbauerster/mpb v3.4.0+incompatible
-github.com/vbauerster/mpb
-github.com/vbauerster/mpb/cwriter
-github.com/vbauerster/mpb/decor
-github.com/vbauerster/mpb/internal
+# github.com/vbauerster/mpb/v4 v4.11.1
+github.com/vbauerster/mpb/v4
+github.com/vbauerster/mpb/v4/cwriter
+github.com/vbauerster/mpb/v4/decor
+github.com/vbauerster/mpb/v4/internal
# github.com/vishvananda/netlink v1.0.0
github.com/vishvananda/netlink
github.com/vishvananda/netlink/nl
@@ -509,14 +519,17 @@ go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
# go.uber.org/atomic v1.4.0
go.uber.org/atomic
-# golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
+# golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708
golang.org/x/crypto/cast5
+golang.org/x/crypto/ed25519
+golang.org/x/crypto/ed25519/internal/edwards25519
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
golang.org/x/crypto/openpgp/elgamal
golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
+golang.org/x/crypto/pbkdf2
golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20190628185345-da137c7871d7
golang.org/x/net/context
@@ -582,6 +595,10 @@ google.golang.org/grpc/status
gopkg.in/fsnotify.v1
# gopkg.in/inf.v0 v0.9.1
gopkg.in/inf.v0
+# gopkg.in/square/go-jose.v2 v2.3.1
+gopkg.in/square/go-jose.v2
+gopkg.in/square/go-jose.v2/cipher
+gopkg.in/square/go-jose.v2/json
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
# gopkg.in/yaml.v2 v2.2.7