summaryrefslogtreecommitdiff
path: root/vendor/gopkg.in
diff options
context:
space:
mode:
authorOpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com>2019-12-20 17:39:45 +0100
committerGitHub <noreply@github.com>2019-12-20 17:39:45 +0100
commitfcd48db4d24f6dba4fb2652d72aa0d86e167aa0c (patch)
treea0aa3c847b8a5bb1ea47bf9895136998b95a9519 /vendor/gopkg.in
parente33d7e9fab9974c8c13868e7434f78feab7508af (diff)
parent50ece79387dcf6c748e3ae1bd6a7067059c0dfe3 (diff)
downloadpodman-fcd48db4d24f6dba4fb2652d72aa0d86e167aa0c.tar.gz
podman-fcd48db4d24f6dba4fb2652d72aa0d86e167aa0c.tar.bz2
podman-fcd48db4d24f6dba4fb2652d72aa0d86e167aa0c.zip
Merge pull request #4708 from containers/dependabot/go_modules/github.com/containers/image/v5-5.1.0
build(deps): bump github.com/containers/image/v5 from 5.0.0 to 5.1.0
Diffstat (limited to 'vendor/gopkg.in')
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc1
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/.gitignore7
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/.travis.yml46
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md10
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md14
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/LICENSE202
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/README.md118
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/asymmetric.go592
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go196
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go75
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go62
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go109
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/crypter.go535
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/doc.go27
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/encoding.go179
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/LICENSE27
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/README.md13
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/decode.go1183
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/encode.go1197
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/indent.go141
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/scanner.go623
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/stream.go480
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/json/tags.go44
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/jwe.go294
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/jwk.go608
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/jws.go321
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/opaque.go83
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/shared.go499
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/signing.go389
-rw-r--r--vendor/gopkg.in/square/go-jose.v2/symmetric.go482
30 files changed, 8557 insertions, 0 deletions
diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc
new file mode 100644
index 000000000..730e569b0
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc
@@ -0,0 +1 @@
+'|&{tU|gG(Cy=+c:u:/p#~["4!nADK<ufha:B/ؤ_hST*wx-|Ӄ㣗A$$6G)8npˡ3̚ovB3]xݓ2lG|qRޯ 2 5R$Yݙl˫yAI"یûk|K[9=|@S3 #x?V,SwPog6&V6 D.dB 7 \ No newline at end of file
diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitignore b/vendor/gopkg.in/square/go-jose.v2/.gitignore
new file mode 100644
index 000000000..5b4d73b68
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/.gitignore
@@ -0,0 +1,7 @@
+*~
+.*.swp
+*.out
+*.test
+*.pem
+*.cov
+jose-util/jose-util
diff --git a/vendor/gopkg.in/square/go-jose.v2/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml
new file mode 100644
index 000000000..fc501ca9b
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/.travis.yml
@@ -0,0 +1,46 @@
+language: go
+
+sudo: false
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+go:
+- '1.7.x'
+- '1.8.x'
+- '1.9.x'
+- '1.10.x'
+- '1.11.x'
+
+go_import_path: gopkg.in/square/go-jose.v2
+
+before_script:
+- export PATH=$HOME/.local/bin:$PATH
+
+before_install:
+# Install encrypted gitcookies to get around bandwidth-limits
+# that is causing Travis-CI builds to fail. For more info, see
+# https://github.com/golang/go/issues/12933
+- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true
+- bash .gitcookies.sh || true
+- go get github.com/wadey/gocovmerge
+- go get github.com/mattn/goveralls
+- go get github.com/stretchr/testify/assert
+- go get golang.org/x/tools/cmd/cover || true
+- go get code.google.com/p/go.tools/cmd/cover || true
+- pip install cram --user
+
+script:
+- go test . -v -covermode=count -coverprofile=profile.cov
+- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov
+- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov
+- go test ./json -v # no coverage for forked encoding/json package
+- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t
+- cd ..
+
+after_success:
+- gocovmerge *.cov */*.cov > merged.coverprofile
+- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci
+
diff --git a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md
new file mode 100644
index 000000000..3305db0f6
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md
@@ -0,0 +1,10 @@
+Serious about security
+======================
+
+Square recognizes the important contributions the security research community
+can make. We therefore encourage reporting security issues with the code
+contained in this repository.
+
+If you believe you have discovered a security vulnerability, please follow the
+guidelines at <https://bugcrowd.com/squareopensource>.
+
diff --git a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md
new file mode 100644
index 000000000..61b183651
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
+
+Before your code can be accepted into the project you must also sign the
+[Individual Contributor License Agreement][1].
+
+ [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
diff --git a/vendor/gopkg.in/square/go-jose.v2/LICENSE b/vendor/gopkg.in/square/go-jose.v2/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/gopkg.in/square/go-jose.v2/README.md
new file mode 100644
index 000000000..1791bfa8f
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/README.md
@@ -0,0 +1,118 @@
+# Go JOSE
+
+[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1)
+[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2)
+[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE)
+[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose)
+[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. This includes support for JSON Web Encryption,
+JSON Web Signature, and JSON Web Token standards.
+
+**Disclaimer**: This library contains encryption software that is subject to
+the U.S. Export Administration Regulations. You may not export, re-export,
+transfer or download this code or any part of it in violation of any United
+States law, directive or regulation. In particular this software may not be
+exported or re-exported in any form or on any media to Iran, North Sudan,
+Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
+US maintained blocked list.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
+[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
+[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519).
+Tables of supported algorithms are shown below. The library supports both
+the compact and full serialization formats, and has optional support for
+multiple recipients. It also comes with a small command-line utility
+([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages.
+
+### Versions
+
+We use [gopkg.in](https://gopkg.in) for versioning.
+
+[Version 2](https://gopkg.in/square/go-jose.v2)
+([branch](https://github.com/square/go-jose/tree/v2),
+[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version:
+
+ import "gopkg.in/square/go-jose.v2"
+
+The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will
+still receive backported bug fixes and security fixes, but otherwise
+development is frozen. All new feature development takes place on the `v2`
+branch. Version 2 also contains additional sub-packages such as the
+[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation
+contributed by [@shaxbee](https://github.com/shaxbee).
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
+standard where possible. The Godoc reference has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES<sup>1</sup>
+ Direct encryption | dir<sup>1</sup>
+
+<sup>1. Not supported in multi-recipient mode</sup>
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+ Ed25519 | EdDSA<sup>2</sup>
+
+<sup>2. Only available in version 2 of the package</sup>
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
+allows attaching a key id.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
+ EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey)
+ AES, HMAC | []byte
+
+<sup>1. Only available in version 2 of the package</sup>
+
+## Examples
+
+[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1)
+[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2)
+
+Examples can be found in the Godoc
+reference for this package. The
+[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)
+subdirectory also contains a small command-line utility which might be useful
+as an example.
diff --git a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go
new file mode 100644
index 000000000..67935561b
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go
@@ -0,0 +1,592 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ "golang.org/x/crypto/ed25519"
+ "gopkg.in/square/go-jose.v2/cipher"
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+type edEncrypterVerifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+type edDecrypterSigner struct {
+ privateKey ed25519.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
+ if sigAlg != EdDSA {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &edDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ b, err := json.Marshal(&JSONWebKey{
+ Key: &priv.PublicKey,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ headers := rawHeader{
+ headerEPK: makeRawMessage(b),
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ epk, err := headers.getEPK()
+ if err != nil {
+ return nil, errors.New("square/go-jose: invalid epk header")
+ }
+ if epk == nil {
+ return nil, errors.New("square/go-jose: missing epk header")
+ }
+
+ publicKey, ok := epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("square/go-jose: invalid epk header")
+ }
+
+ if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return nil, errors.New("square/go-jose: invalid public key in epk header")
+ }
+
+ apuData, err := headers.getAPU()
+ if err != nil {
+ return nil, errors.New("square/go-jose: invalid apu header")
+ }
+ apvData, err := headers.getAPV()
+ if err != nil {
+ return nil, errors.New("square/go-jose: invalid apv header")
+ }
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ algorithm := headers.getAlgorithm()
+ switch algorithm {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(string(algorithm), keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ if alg != EdDSA {
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: sig,
+ protected: &rawHeader{},
+ }, nil
+}
+
+func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ if alg != EdDSA {
+ return ErrUnsupportedAlgorithm
+ }
+ ok := ed25519.Verify(ctx.publicKey, payload, signature)
+ if !ok {
+ return errors.New("square/go-jose: ed25519 signature failed to verify")
+ }
+ return nil
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("square/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
new file mode 100644
index 000000000..126b85ce2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, []byte(ciphertext[:offset])...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures the the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n uint64) (head, tail []byte) {
+ if uint64(cap(in)) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("square/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
new file mode 100644
index 000000000..f62c3bdba
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
new file mode 100644
index 000000000..c128e327f
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
@@ -0,0 +1,62 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+// It is an error to call this function with a private/public key that are not on the same
+// curve. Callers must ensure that the keys are valid before calling this function. Output
+// size may be at most 1<<16 bytes (64 KiB).
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ if size > 1<<16 {
+ panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
+ }
+
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
+ panic("public key not on same curve as private key")
+ }
+
+ z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+ return key
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
new file mode 100644
index 000000000..1d36d5015
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("square/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go
new file mode 100644
index 000000000..c45c71206
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/crypter.go
@@ -0,0 +1,535 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JSONWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
+ Options() EncrypterOptions
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// EncrypterOptions represents options that can be set on new encrypters.
+type EncrypterOptions struct {
+ Compression CompressionAlgorithm
+
+ // Optional map of additional keys to be inserted into the protected header
+ // of a JWS object. Some specifications which make use of JWS like to insert
+ // additional values here. All values must be JSON-serializable.
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary. It returns itself and so can be used in a fluent style.
+func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
+ if eo.ExtraHeaders == nil {
+ eo.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ eo.ExtraHeaders[k] = v
+ return eo
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// EncrypterOptions.
+func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
+func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderType, typ)
+}
+
+// Recipient represents an algorithm/key to encrypt messages to.
+//
+// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
+// on the password-based encryption algorithms PBES2-HS256+A128KW,
+// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
+// default of 100000 will be used for the count and a 128-bit random salt will
+// be generated.
+type Recipient struct {
+ Algorithm KeyAlgorithm
+ Key interface{}
+ KeyID string
+ PBES2Count int
+ PBES2Salt []byte
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := rcpt.Key.(type) {
+ case JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case *JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch rcpt.Algorithm {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
+ return nil, ErrInvalidKeySize
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: rawKey.([]byte),
+ }
+ recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ typeOf := reflect.TypeOf(rawKey)
+ if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: rawKey.(*ecdsa.PublicKey),
+ }
+ recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.addRecipient(rcpt)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+ if rcpts == nil || len(rcpts) == 0 {
+ return nil, fmt.Errorf("square/go-jose: recipients is nil or empty")
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ }
+
+ for _, recipient := range rcpts {
+ err := encrypter.addRecipient(recipient)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
+ var recipientInfo recipientKeyInfo
+
+ switch recipient.Algorithm {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
+ }
+
+ recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
+ if recipient.KeyID != "" {
+ recipientInfo.keyID = recipient.KeyID
+ }
+
+ switch recipient.Algorithm {
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
+ sr.p2c = recipient.PBES2Count
+ sr.p2s = recipient.PBES2Salt
+ }
+ }
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipientInfo)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case string:
+ return newSymmetricRecipient(alg, []byte(encryptionKey))
+ case *JSONWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ recipient.keyID = encryptionKey.KeyID
+ return recipient, err
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+ }
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case string:
+ return &symmetricKeyCipher{
+ key: []byte(decryptionKey),
+ }, nil
+ case JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case *JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{}
+ err := obj.protected.set(headerEncryption, ctx.contentAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ err = recipient.header.set(headerAlgorithm, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ if info.keyID != "" {
+ err = recipient.header.set(headerKeyID, info.keyID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ err = obj.protected.set(headerCompression, ctx.compressionAlg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for k, v := range ctx.extraHeaders {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ (*obj.protected)[k] = makeRawMessage(b)
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+func (ctx *genericEncrypter) Options() EncrypterOptions {
+ return EncrypterOptions{
+ Compression: ctx.compressionAlg,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Decrypt and validate the object and return the plaintext. Note that this
+// function does not support multi-recipient, if you desire multi-recipient
+// decryption use DecryptMulti instead.
+func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(obj.recipients) > 1 {
+ return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one")
+ }
+
+ critical, err := headers.getCritical()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return nil, fmt.Errorf("square/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.getEncryption())
+ if cipher == nil {
+ return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ recipient := obj.recipients[0]
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ }
+
+ return plaintext, err
+}
+
+// DecryptMulti decrypts and validates the object and returns the plaintexts,
+// with support for multiple recipients. It returns the index of the recipient
+// for which the decryption was successful, the merged headers for that recipient,
+// and the plaintext.
+func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
+ globalHeaders := obj.mergedHeaders(nil)
+
+ critical, err := globalHeaders.getCritical()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
+
+ encryption := globalHeaders.getEncryption()
+ cipher := getContentCipher(encryption)
+ if cipher == nil {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ index := -1
+ var plaintext []byte
+ var headers rawHeader
+
+ for i, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ index = i
+ headers = recipientHeaders
+ break
+ }
+ }
+ }
+
+ if plaintext == nil || err != nil {
+ return -1, Header{}, nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ }
+
+ sanitized, err := headers.sanitized()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err)
+ }
+
+ return index, sanitized, plaintext, err
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/doc.go b/vendor/gopkg.in/square/go-jose.v2/doc.go
new file mode 100644
index 000000000..dd1387f3f
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/doc.go
@@ -0,0 +1,27 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. It implements encryption and signing based on
+the JSON Web Encryption and JSON Web Signature standards, with optional JSON
+Web Token support available in a sub-package. The library supports both the
+compact and full serialization formats, and has optional support for multiple
+recipients.
+
+*/
+package jose
diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go
new file mode 100644
index 000000000..b9687c647
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/encoding.go
@@ -0,0 +1,179 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "io"
+ "math/big"
+ "regexp"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+var stripWhitespaceRegex = regexp.MustCompile("\\s")
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := json.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/square/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ return stripWhitespaceRegex.ReplaceAllString(data, "")
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Compress with DEFLATE
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// Decompress with DEFLATE
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ _, err := io.Copy(output, reader)
+ if err != nil {
+ return nil, err
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := json.Unmarshal(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64.RawURLEncoding.DecodeString(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64.RawURLEncoding.EncodeToString(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/README.md b/vendor/gopkg.in/square/go-jose.v2/json/README.md
new file mode 100644
index 000000000..86de5e558
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go
new file mode 100644
index 000000000..37457e5a8
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/decode.go
@@ -0,0 +1,1183 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ useNumber bool
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/encode.go b/vendor/gopkg.in/square/go-jose.v2/json/encode.go
new file mode 100644
index 000000000..1dae8bb7c
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // The characters can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by objects that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+ Value reflect.Value
+ Str string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+ S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+ scratch [64]byte
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+ if v := encodeStatePool.Get(); v != nil {
+ e := v.(*encodeState)
+ e.Reset()
+ return e
+ }
+ return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if s, ok := r.(string); ok {
+ panic(s)
+ }
+ err = r.(error)
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v))
+ return nil
+}
+
+func (e *encodeState) error(err error) {
+ panic(err)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value) {
+ valueEncoder(v)(e, v, false)
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
+
+var encoderCache struct {
+ sync.RWMutex
+ m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+ if !v.IsValid() {
+ return invalidValueEncoder
+ }
+ return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+ encoderCache.RLock()
+ f := encoderCache.m[t]
+ encoderCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // To deal with recursive types, populate the map with an
+ // indirect func before we build it. This type waits on the
+ // real func (f) to be ready and then calls it. This indirect
+ // func is only used for recursive types.
+ encoderCache.Lock()
+ if encoderCache.m == nil {
+ encoderCache.m = make(map[reflect.Type]encoderFunc)
+ }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
+ wg.Wait()
+ f(e, v, quoted)
+ }
+ encoderCache.Unlock()
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = newTypeEncoder(t, true)
+ wg.Done()
+ encoderCache.Lock()
+ encoderCache.m[t] = f
+ encoderCache.Unlock()
+ return f
+}
+
+var (
+ marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+ textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+ if t.Implements(marshalerType) {
+ return marshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ if t.Implements(textMarshalerType) {
+ return textMarshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(textMarshalerType) {
+ return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return boolEncoder
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intEncoder
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintEncoder
+ case reflect.Float32:
+ return float32Encoder
+ case reflect.Float64:
+ return float64Encoder
+ case reflect.String:
+ return stringEncoder
+ case reflect.Interface:
+ return interfaceEncoder
+ case reflect.Struct:
+ return newStructEncoder(t)
+ case reflect.Map:
+ return newMapEncoder(t)
+ case reflect.Slice:
+ return newSliceEncoder(t)
+ case reflect.Array:
+ return newArrayEncoder(t)
+ case reflect.Ptr:
+ return newPtrEncoder(t)
+ default:
+ return unsupportedTypeEncoder
+ }
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if quoted {
+ e.WriteByte('"')
+ }
+ if v.Bool() {
+ e.WriteString("true")
+ } else {
+ e.WriteString("false")
+ }
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ f := v.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+ }
+ b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+var (
+ float32Encoder = (floatEncoder(32)).encode
+ float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Type() == numberType {
+ numStr := v.String()
+ // In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+ // we keep compatibility so check validity after this.
+ if numStr == "" {
+ numStr = "0" // Number's zero-val
+ }
+ if !isValidNumber(numStr) {
+ e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+ }
+ e.WriteString(numStr)
+ return
+ }
+ if quoted {
+ sb, err := Marshal(v.String())
+ if err != nil {
+ e.error(err)
+ }
+ e.string(string(sb))
+ } else {
+ e.string(v.String())
+ }
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem())
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+ fields []field
+ fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteByte('{')
+ first := true
+ for i, f := range se.fields {
+ fv := fieldByIndex(v, f.index)
+ if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ e.WriteByte(',')
+ }
+ e.string(f.name)
+ e.WriteByte(':')
+ se.fieldEncs[i](e, fv, f.quoted)
+ }
+ e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+ fields := cachedTypeFields(t)
+ se := &structEncoder{
+ fields: fields,
+ fieldEncs: make([]encoderFunc, len(fields)),
+ }
+ for i, f := range fields {
+ se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+ }
+ return se.encode
+}
+
+type mapEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.WriteByte('{')
+ var sv stringValues = v.MapKeys()
+ sort.Sort(sv)
+ for i, k := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(k.String())
+ e.WriteByte(':')
+ me.elemEnc(e, v.MapIndex(k), false)
+ }
+ e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+ if t.Key().Kind() != reflect.String {
+ return unsupportedTypeEncoder
+ }
+ me := &mapEncoder{typeEncoder(t.Elem())}
+ return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ s := v.Bytes()
+ e.WriteByte('"')
+ if len(s) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+ arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ se.arrayEnc(e, v, false)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+ // Byte slices get special treatment; arrays don't.
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteSlice
+ }
+ enc := &sliceEncoder{newArrayEncoder(t)}
+ return enc.encode
+}
+
+type arrayEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ ae.elemEnc(e, v.Index(i), false)
+ }
+ e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+ enc := &arrayEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type ptrEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ pe.elemEnc(e, v.Elem(), quoted)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+ enc := &ptrEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type condAddrEncoder struct {
+ canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.CanAddr() {
+ ce.canAddrEnc(e, v, quoted)
+ } else {
+ ce.elseEnc(e, v, quoted)
+ }
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+ enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return enc.encode
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+ for _, i := range index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, > and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, >, and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Only strings, floats, integers, and booleans can be quoted.
+ quoted := false
+ if opts.Contains("string") {
+ switch ft.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ quoted = true
+ }
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: quoted,
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/indent.go b/vendor/gopkg.in/square/go-jose.v2/json/indent.go
new file mode 100644
index 000000000..7cd9f4db1
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/indent.go
@@ -0,0 +1,141 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ start := 0
+ for i, c := range src {
+ if escape && (c == '<' || c == '>' || c == '&') {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ v := scan.step(&scan, c)
+ if v >= scanSkipSpace {
+ if v == scanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.step(&scan, c)
+ if v == scanSkipSpace {
+ continue
+ }
+ if v == scanError {
+ break
+ }
+ if needIndent && v != scanEndObject && v != scanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == scanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ return nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/scanner.go b/vendor/gopkg.in/square/go-jose.v2/json/scanner.go
new file mode 100644
index 000000000..ee6622e8c
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/scanner.go
@@ -0,0 +1,623 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+ scan.reset()
+ for i, c := range data {
+ v := scan.step(scan, c)
+ if v >= scanEndObject {
+ switch v {
+ // probe the scanner with a space to determine whether we will
+ // get scanEnd on the next character. Otherwise, if the next character
+ // is not a space, scanEndTop allocates a needless error.
+ case scanEndObject, scanEndArray:
+ if scan.step(scan, ' ') == scanEnd {
+ return data[:i+1], data[i+1:], nil
+ }
+ case scanError:
+ return nil, nil, scan.err
+ case scanEnd:
+ return data[:i], data[i:], nil
+ }
+ }
+ }
+ if scan.eof() == scanError {
+ return nil, nil, scan.err
+ }
+ return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ step func(*scanner, byte) int
+
+ // Reached end of top-level value.
+ endTop bool
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ err error
+
+ // 1-byte redo (see undo method)
+ redo bool
+ redoCode int
+ redoState func(*scanner, byte) int
+
+ // total bytes consumed, updated by decoder.Decode
+ bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+ // Continue.
+ scanContinue = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+ s.redo = false
+ s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+ s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ s.redo = false
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+func isSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ s.pushParseState(parseObjectKey)
+ return scanBeginObject
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ s.pushParseState(parseArrayValue)
+ return scanBeginArray
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+ s.step = stateError
+ s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+ if s.redo {
+ panic("json: invalid use of scanner")
+ }
+ s.redoCode = scanCode
+ s.redoState = s.step
+ s.step = stateRedo
+ s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c byte) int {
+ s.redo = false
+ s.step = s.redoState
+ return s.redoCode
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/stream.go b/vendor/gopkg.in/square/go-jose.v2/json/stream.go
new file mode 100644
index 000000000..8ddcf4d27
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/stream.go
@@ -0,0 +1,480 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// A Decoder reads and decodes JSON objects from an input stream.
+type Decoder struct {
+ r io.Reader
+ buf []byte
+ d decodeState
+ scanp int // start of unread data in buf
+ scan scanner
+ err error
+
+ tokenState int
+ tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ if err := dec.tokenPrepareForDecode(); err != nil {
+ return err
+ }
+
+ if !dec.tokenValueAllowed() {
+ return &SyntaxError{msg: "not at beginning of value"}
+ }
+
+ // Read whole value into buffer.
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+ dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+ dec.scanp += n
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ err = dec.d.unmarshal(v)
+
+ // fixup token streaming state
+ dec.tokenValueEnd()
+
+ return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.reset()
+
+ scanp := dec.scanp
+ var err error
+Input:
+ for {
+ // Look in the buffer for a new value.
+ for i, c := range dec.buf[scanp:] {
+ dec.scan.bytes++
+ v := dec.scan.step(&dec.scan, c)
+ if v == scanEnd {
+ scanp += i
+ break Input
+ }
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+ scanp += i + 1
+ break Input
+ }
+ if v == scanError {
+ dec.err = dec.scan.err
+ return 0, dec.scan.err
+ }
+ }
+ scanp = len(dec.buf)
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.step(&dec.scan, ' ') == scanEnd {
+ break Input
+ }
+ if nonSpace(dec.buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ n := scanp - dec.scanp
+ err = dec.refill()
+ scanp = dec.scanp + n
+ }
+ return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+ // Make room to read more into the buffer.
+ // First slide down data already consumed.
+ if dec.scanp > 0 {
+ n := copy(dec.buf, dec.buf[dec.scanp:])
+ dec.buf = dec.buf[:n]
+ dec.scanp = 0
+ }
+
+ // Grow buffer if not large enough.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf) < minRead {
+ newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+ copy(newBuf, dec.buf)
+ dec.buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+ return err
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+ w io.Writer
+ err error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ e := newEncodeState()
+ err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ e.WriteByte('\n')
+
+ if _, err = enc.w.Write(e.Bytes()); err != nil {
+ enc.err = err
+ }
+ encodeStatePool.Put(e)
+ return err
+}
+
+// RawMessage is a raw encoded JSON object.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+ return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+// A Token holds a value of one of these types:
+//
+// Delim, for the four JSON delimiters [ ] { }
+// bool, for JSON booleans
+// float64, for JSON numbers
+// Number, for JSON numbers
+// string, for JSON string literals
+// nil, for JSON null
+//
+type Token interface{}
+
+const (
+ tokenTopValue = iota
+ tokenArrayStart
+ tokenArrayValue
+ tokenArrayComma
+ tokenObjectStart
+ tokenObjectKey
+ tokenObjectColon
+ tokenObjectValue
+ tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+ // Note: Not calling peek before switch, to avoid
+ // putting peek into the standard Decode path.
+ // peek is only called when using the Token API.
+ switch dec.tokenState {
+ case tokenArrayComma:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ',' {
+ return &SyntaxError{"expected comma after array element", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ case tokenObjectColon:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ':' {
+ return &SyntaxError{"expected colon after object key", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ }
+ return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+ switch dec.tokenState {
+ case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ return true
+ }
+ return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+ switch dec.tokenState {
+ case tokenArrayStart, tokenArrayValue:
+ dec.tokenState = tokenArrayComma
+ case tokenObjectValue:
+ dec.tokenState = tokenObjectComma
+ }
+}
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+ return string(d)
+}
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+ for {
+ c, err := dec.peek()
+ if err != nil {
+ return nil, err
+ }
+ switch c {
+ case '[':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenArrayStart
+ return Delim('['), nil
+
+ case ']':
+ if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim(']'), nil
+
+ case '{':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenObjectStart
+ return Delim('{'), nil
+
+ case '}':
+ if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim('}'), nil
+
+ case ':':
+ if dec.tokenState != tokenObjectColon {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ continue
+
+ case ',':
+ if dec.tokenState == tokenArrayComma {
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ continue
+ }
+ if dec.tokenState == tokenObjectComma {
+ dec.scanp++
+ dec.tokenState = tokenObjectKey
+ continue
+ }
+ return dec.tokenError(c)
+
+ case '"':
+ if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+ var x string
+ old := dec.tokenState
+ dec.tokenState = tokenTopValue
+ err := dec.Decode(&x)
+ dec.tokenState = old
+ if err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ dec.tokenState = tokenObjectColon
+ return x, nil
+ }
+ fallthrough
+
+ default:
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ return x, nil
+ }
+ }
+}
+
+func clearOffset(err error) {
+ if s, ok := err.(*SyntaxError); ok {
+ s.Offset = 0
+ }
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+ var context string
+ switch dec.tokenState {
+ case tokenTopValue:
+ context = " looking for beginning of value"
+ case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ context = " looking for beginning of value"
+ case tokenArrayComma:
+ context = " after array element"
+ case tokenObjectKey:
+ context = " looking for beginning of object key string"
+ case tokenObjectColon:
+ context = " after object key"
+ case tokenObjectComma:
+ context = " after object key:value pair"
+ }
+ return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+ c, err := dec.peek()
+ return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+ var err error
+ for {
+ for i := dec.scanp; i < len(dec.buf); i++ {
+ c := dec.buf[i]
+ if isSpace(c) {
+ continue
+ }
+ dec.scanp = i
+ return c, nil
+ }
+ // buffer has been scanned, now report any error
+ if err != nil {
+ return 0, err
+ }
+ err = dec.refill()
+ }
+}
+
+/*
+TODO
+
+// EncodeToken writes the given JSON token to the stream.
+// It returns an error if the delimiters [ ] { } are not properly used.
+//
+// EncodeToken does not call Flush, because usually it is part of
+// a larger operation such as Encode, and those will call Flush when finished.
+// Callers that create an Encoder and then invoke EncodeToken directly,
+// without using Encode, need to call Flush when finished to ensure that
+// the JSON is written to the underlying writer.
+func (e *Encoder) EncodeToken(t Token) error {
+ ...
+}
+
+*/
diff --git a/vendor/gopkg.in/square/go-jose.v2/json/tags.go b/vendor/gopkg.in/square/go-jose.v2/json/tags.go
new file mode 100644
index 000000000..c38fd5102
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/jwe.go b/vendor/gopkg.in/square/go-jose.v2/jwe.go
new file mode 100644
index 000000000..b5a6dcdf4
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/jwe.go
@@ -0,0 +1,294 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
+type rawJSONWebEncryption struct {
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Unprotected *rawHeader `json:"unprotected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Recipients []rawRecipientInfo `json:"recipients,omitempty"`
+ Aad *byteBuffer `json:"aad,omitempty"`
+ EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
+ Iv *byteBuffer `json:"iv,omitempty"`
+ Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
+ Tag *byteBuffer `json:"tag,omitempty"`
+}
+
+// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
+type rawRecipientInfo struct {
+ Header *rawHeader `json:"header,omitempty"`
+ EncryptedKey string `json:"encrypted_key,omitempty"`
+}
+
+// JSONWebEncryption represents an encrypted JWE object after parsing.
+type JSONWebEncryption struct {
+ Header Header
+ protected, unprotected *rawHeader
+ recipients []recipientInfo
+ aad, iv, ciphertext, tag []byte
+ original *rawJSONWebEncryption
+}
+
+// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
+type recipientInfo struct {
+ header *rawHeader
+ encryptedKey []byte
+}
+
+// GetAuthData retrieves the (optional) authenticated data attached to the object.
+func (obj JSONWebEncryption) GetAuthData() []byte {
+ if obj.aad != nil {
+ out := make([]byte, len(obj.aad))
+ copy(out, obj.aad)
+ return out
+ }
+
+ return nil
+}
+
+// Get the merged header values
+func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
+ out := rawHeader{}
+ out.merge(obj.protected)
+ out.merge(obj.unprotected)
+
+ if recipient != nil {
+ out.merge(recipient.header)
+ }
+
+ return out
+}
+
+// Get the additional authenticated data from a JWE object.
+func (obj JSONWebEncryption) computeAuthData() []byte {
+ var protected string
+
+ if obj.original != nil && obj.original.Protected != nil {
+ protected = obj.original.Protected.base64()
+ } else if obj.protected != nil {
+ protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected)))
+ } else {
+ protected = ""
+ }
+
+ output := []byte(protected)
+ if obj.aad != nil {
+ output = append(output, '.')
+ output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...)
+ }
+
+ return output
+}
+
+// ParseEncrypted parses an encrypted message in compact or full serialization format.
+func ParseEncrypted(input string) (*JSONWebEncryption, error) {
+ input = stripWhitespace(input)
+ if strings.HasPrefix(input, "{") {
+ return parseEncryptedFull(input)
+ }
+
+ return parseEncryptedCompact(input)
+}
+
+// parseEncryptedFull parses a message in compact format.
+func parseEncryptedFull(input string) (*JSONWebEncryption, error) {
+ var parsed rawJSONWebEncryption
+ err := json.Unmarshal([]byte(input), &parsed)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsed.sanitized()
+}
+
+// sanitized produces a cleaned-up JWE object from the raw JSON.
+func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{
+ original: parsed,
+ unprotected: parsed.Unprotected,
+ }
+
+ // Check that there is not a nonce in the unprotected headers
+ if parsed.Unprotected != nil {
+ if nonce := parsed.Unprotected.getNonce(); nonce != "" {
+ return nil, ErrUnprotectedNonce
+ }
+ }
+ if parsed.Header != nil {
+ if nonce := parsed.Header.getNonce(); nonce != "" {
+ return nil, ErrUnprotectedNonce
+ }
+ }
+
+ if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
+ err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
+ }
+ }
+
+ // Note: this must be called _after_ we parse the protected header,
+ // otherwise fields from the protected header will not get picked up.
+ var err error
+ mergedHeaders := obj.mergedHeaders(nil)
+ obj.Header, err = mergedHeaders.sanitized()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
+ }
+
+ if len(parsed.Recipients) == 0 {
+ obj.recipients = []recipientInfo{
+ {
+ header: parsed.Header,
+ encryptedKey: parsed.EncryptedKey.bytes(),
+ },
+ }
+ } else {
+ obj.recipients = make([]recipientInfo, len(parsed.Recipients))
+ for r := range parsed.Recipients {
+ encryptedKey, err := base64.RawURLEncoding.DecodeString(parsed.Recipients[r].EncryptedKey)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ obj.recipients[r].header = parsed.Recipients[r].Header
+ obj.recipients[r].encryptedKey = encryptedKey
+ }
+ }
+
+ for _, recipient := range obj.recipients {
+ headers := obj.mergedHeaders(&recipient)
+ if headers.getAlgorithm() == "" || headers.getEncryption() == "" {
+ return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers")
+ }
+ }
+
+ obj.iv = parsed.Iv.bytes()
+ obj.ciphertext = parsed.Ciphertext.bytes()
+ obj.tag = parsed.Tag.bytes()
+ obj.aad = parsed.Aad.bytes()
+
+ return obj, nil
+}
+
+// parseEncryptedCompact parses a message in compact format.
+func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
+ parts := strings.Split(input, ".")
+ if len(parts) != 5 {
+ return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts")
+ }
+
+ rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
+ if err != nil {
+ return nil, err
+ }
+
+ encryptedKey, err := base64.RawURLEncoding.DecodeString(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ iv, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertext, err := base64.RawURLEncoding.DecodeString(parts[3])
+ if err != nil {
+ return nil, err
+ }
+
+ tag, err := base64.RawURLEncoding.DecodeString(parts[4])
+ if err != nil {
+ return nil, err
+ }
+
+ raw := &rawJSONWebEncryption{
+ Protected: newBuffer(rawProtected),
+ EncryptedKey: newBuffer(encryptedKey),
+ Iv: newBuffer(iv),
+ Ciphertext: newBuffer(ciphertext),
+ Tag: newBuffer(tag),
+ }
+
+ return raw.sanitized()
+}
+
+// CompactSerialize serializes an object using the compact serialization format.
+func (obj JSONWebEncryption) CompactSerialize() (string, error) {
+ if len(obj.recipients) != 1 || obj.unprotected != nil ||
+ obj.protected == nil || obj.recipients[0].header != nil {
+ return "", ErrNotSupported
+ }
+
+ serializedProtected := mustSerializeJSON(obj.protected)
+
+ return fmt.Sprintf(
+ "%s.%s.%s.%s.%s",
+ base64.RawURLEncoding.EncodeToString(serializedProtected),
+ base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
+ base64.RawURLEncoding.EncodeToString(obj.iv),
+ base64.RawURLEncoding.EncodeToString(obj.ciphertext),
+ base64.RawURLEncoding.EncodeToString(obj.tag)), nil
+}
+
+// FullSerialize serializes an object using the full JSON serialization format.
+func (obj JSONWebEncryption) FullSerialize() string {
+ raw := rawJSONWebEncryption{
+ Unprotected: obj.unprotected,
+ Iv: newBuffer(obj.iv),
+ Ciphertext: newBuffer(obj.ciphertext),
+ EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
+ Tag: newBuffer(obj.tag),
+ Aad: newBuffer(obj.aad),
+ Recipients: []rawRecipientInfo{},
+ }
+
+ if len(obj.recipients) > 1 {
+ for _, recipient := range obj.recipients {
+ info := rawRecipientInfo{
+ Header: recipient.header,
+ EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey),
+ }
+ raw.Recipients = append(raw.Recipients, info)
+ }
+ } else {
+ // Use flattened serialization
+ raw.Header = obj.recipients[0].header
+ raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
+ }
+
+ if obj.protected != nil {
+ raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
+ }
+
+ return string(mustSerializeJSON(raw))
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/jwk.go b/vendor/gopkg.in/square/go-jose.v2/jwk.go
new file mode 100644
index 000000000..6cb8adb84
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/jwk.go
@@ -0,0 +1,608 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+
+ "golang.org/x/crypto/ed25519"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing.
+type rawJSONWebKey struct {
+ Use string `json:"use,omitempty"`
+ Kty string `json:"kty,omitempty"`
+ Kid string `json:"kid,omitempty"`
+ Crv string `json:"crv,omitempty"`
+ Alg string `json:"alg,omitempty"`
+ K *byteBuffer `json:"k,omitempty"`
+ X *byteBuffer `json:"x,omitempty"`
+ Y *byteBuffer `json:"y,omitempty"`
+ N *byteBuffer `json:"n,omitempty"`
+ E *byteBuffer `json:"e,omitempty"`
+ // -- Following fields are only used for private keys --
+ // RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
+ // completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
+ // we have a private key whereas D == nil means we have only a public key.
+ D *byteBuffer `json:"d,omitempty"`
+ P *byteBuffer `json:"p,omitempty"`
+ Q *byteBuffer `json:"q,omitempty"`
+ Dp *byteBuffer `json:"dp,omitempty"`
+ Dq *byteBuffer `json:"dq,omitempty"`
+ Qi *byteBuffer `json:"qi,omitempty"`
+ // Certificates
+ X5c []string `json:"x5c,omitempty"`
+}
+
+// JSONWebKey represents a public or private key in JWK format.
+type JSONWebKey struct {
+ Key interface{}
+ Certificates []*x509.Certificate
+ KeyID string
+ Algorithm string
+ Use string
+}
+
+// MarshalJSON serializes the given key to its JSON representation.
+func (k JSONWebKey) MarshalJSON() ([]byte, error) {
+ var raw *rawJSONWebKey
+ var err error
+
+ switch key := k.Key.(type) {
+ case ed25519.PublicKey:
+ raw = fromEdPublicKey(key)
+ case *ecdsa.PublicKey:
+ raw, err = fromEcPublicKey(key)
+ case *rsa.PublicKey:
+ raw = fromRsaPublicKey(key)
+ case ed25519.PrivateKey:
+ raw, err = fromEdPrivateKey(key)
+ case *ecdsa.PrivateKey:
+ raw, err = fromEcPrivateKey(key)
+ case *rsa.PrivateKey:
+ raw, err = fromRsaPrivateKey(key)
+ case []byte:
+ raw, err = fromSymmetricKey(key)
+ default:
+ return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ raw.Kid = k.KeyID
+ raw.Alg = k.Algorithm
+ raw.Use = k.Use
+
+ for _, cert := range k.Certificates {
+ raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
+ }
+
+ return json.Marshal(raw)
+}
+
+// UnmarshalJSON reads a key from its JSON representation.
+func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
+ var raw rawJSONWebKey
+ err = json.Unmarshal(data, &raw)
+ if err != nil {
+ return err
+ }
+
+ var key interface{}
+ switch raw.Kty {
+ case "EC":
+ if raw.D != nil {
+ key, err = raw.ecPrivateKey()
+ } else {
+ key, err = raw.ecPublicKey()
+ }
+ case "RSA":
+ if raw.D != nil {
+ key, err = raw.rsaPrivateKey()
+ } else {
+ key, err = raw.rsaPublicKey()
+ }
+ case "oct":
+ key, err = raw.symmetricKey()
+ case "OKP":
+ if raw.Crv == "Ed25519" && raw.X != nil {
+ if raw.D != nil {
+ key, err = raw.edPrivateKey()
+ } else {
+ key, err = raw.edPublicKey()
+ }
+ } else {
+ err = fmt.Errorf("square/go-jose: unknown curve %s'", raw.Crv)
+ }
+ default:
+ err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty)
+ }
+
+ if err == nil {
+ *k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use}
+
+ k.Certificates, err = parseCertificateChain(raw.X5c)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal x5c field: %s", err)
+ }
+ }
+
+ return
+}
+
+// JSONWebKeySet represents a JWK Set object.
+type JSONWebKeySet struct {
+ Keys []JSONWebKey `json:"keys"`
+}
+
+// Key convenience method returns keys by key ID. Specification states
+// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
+// cases where they are not distinct. Hence method returns a slice
+// of JSONWebKeys.
+func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
+ var keys []JSONWebKey
+ for _, key := range s.Keys {
+ if key.KeyID == kid {
+ keys = append(keys, key)
+ }
+ }
+
+ return keys
+}
+
+const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
+const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
+const edThumbprintTemplate = `{"crv":"%s","kty":"OKP",x":"%s"}`
+
+func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
+ coordLength := curveSize(curve)
+ crv, err := curveName(curve)
+ if err != nil {
+ return "", err
+ }
+
+ if len(x.Bytes()) > coordLength || len(y.Bytes()) > coordLength {
+ return "", errors.New("square/go-jose: invalid elliptic key (too large)")
+ }
+
+ return fmt.Sprintf(ecThumbprintTemplate, crv,
+ newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
+ newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
+}
+
+func rsaThumbprintInput(n *big.Int, e int) (string, error) {
+ return fmt.Sprintf(rsaThumbprintTemplate,
+ newBufferFromInt(uint64(e)).base64(),
+ newBuffer(n.Bytes()).base64()), nil
+}
+
+func edThumbprintInput(ed ed25519.PublicKey) (string, error) {
+ crv := "Ed25519"
+ if len(ed) > 32 {
+ return "", errors.New("square/go-jose: invalid elliptic key (too large)")
+ }
+ return fmt.Sprintf(edThumbprintTemplate, crv,
+ newFixedSizeBuffer(ed, 32).base64()), nil
+}
+
+// Thumbprint computes the JWK Thumbprint of a key using the
+// indicated hash algorithm.
+func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
+ var input string
+ var err error
+ switch key := k.Key.(type) {
+ case ed25519.PublicKey:
+ input, err = edThumbprintInput(key)
+ case *ecdsa.PublicKey:
+ input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
+ case *ecdsa.PrivateKey:
+ input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
+ case *rsa.PublicKey:
+ input, err = rsaThumbprintInput(key.N, key.E)
+ case *rsa.PrivateKey:
+ input, err = rsaThumbprintInput(key.N, key.E)
+ case ed25519.PrivateKey:
+ input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
+ default:
+ return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ h := hash.New()
+ h.Write([]byte(input))
+ return h.Sum(nil), nil
+}
+
+// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
+func (k *JSONWebKey) IsPublic() bool {
+ switch k.Key.(type) {
+ case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
+ return true
+ default:
+ return false
+ }
+}
+
+// Public creates JSONWebKey with corresponding publik key if JWK represents asymmetric private key.
+func (k *JSONWebKey) Public() JSONWebKey {
+ if k.IsPublic() {
+ return *k
+ }
+ ret := *k
+ switch key := k.Key.(type) {
+ case *ecdsa.PrivateKey:
+ ret.Key = key.Public()
+ case *rsa.PrivateKey:
+ ret.Key = key.Public()
+ case ed25519.PrivateKey:
+ ret.Key = key.Public()
+ default:
+ return JSONWebKey{} // returning invalid key
+ }
+ return ret
+}
+
+// Valid checks that the key contains the expected parameters.
+func (k *JSONWebKey) Valid() bool {
+ if k.Key == nil {
+ return false
+ }
+ switch key := k.Key.(type) {
+ case *ecdsa.PublicKey:
+ if key.Curve == nil || key.X == nil || key.Y == nil {
+ return false
+ }
+ case *ecdsa.PrivateKey:
+ if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
+ return false
+ }
+ case *rsa.PublicKey:
+ if key.N == nil || key.E == 0 {
+ return false
+ }
+ case *rsa.PrivateKey:
+ if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
+ return false
+ }
+ case ed25519.PublicKey:
+ if len(key) != 32 {
+ return false
+ }
+ case ed25519.PrivateKey:
+ if len(key) != 64 {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
+ if key.N == nil || key.E == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values")
+ }
+
+ return &rsa.PublicKey{
+ N: key.N.bigInt(),
+ E: key.E.toInt(),
+ }, nil
+}
+
+func fromEdPublicKey(pub ed25519.PublicKey) *rawJSONWebKey {
+ return &rawJSONWebKey{
+ Kty: "OKP",
+ Crv: "Ed25519",
+ X: newBuffer(pub),
+ }
+}
+
+func fromRsaPublicKey(pub *rsa.PublicKey) *rawJSONWebKey {
+ return &rawJSONWebKey{
+ Kty: "RSA",
+ N: newBuffer(pub.N.Bytes()),
+ E: newBufferFromInt(uint64(pub.E)),
+ }
+}
+
+func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
+ var curve elliptic.Curve
+ switch key.Crv {
+ case "P-256":
+ curve = elliptic.P256()
+ case "P-384":
+ curve = elliptic.P384()
+ case "P-521":
+ curve = elliptic.P521()
+ default:
+ return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
+ }
+
+ if key.X == nil || key.Y == nil {
+ return nil, errors.New("square/go-jose: invalid EC key, missing x/y values")
+ }
+
+ // The length of this octet string MUST be the full size of a coordinate for
+ // the curve specified in the "crv" parameter.
+ // https://tools.ietf.org/html/rfc7518#section-6.2.1.2
+ if curveSize(curve) != len(key.X.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for x")
+ }
+
+ if curveSize(curve) != len(key.Y.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for y")
+ }
+
+ x := key.X.bigInt()
+ y := key.Y.bigInt()
+
+ if !curve.IsOnCurve(x, y) {
+ return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
+ }
+
+ return &ecdsa.PublicKey{
+ Curve: curve,
+ X: x,
+ Y: y,
+ }, nil
+}
+
+func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
+ if pub == nil || pub.X == nil || pub.Y == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)")
+ }
+
+ name, err := curveName(pub.Curve)
+ if err != nil {
+ return nil, err
+ }
+
+ size := curveSize(pub.Curve)
+
+ xBytes := pub.X.Bytes()
+ yBytes := pub.Y.Bytes()
+
+ if len(xBytes) > size || len(yBytes) > size {
+ return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)")
+ }
+
+ key := &rawJSONWebKey{
+ Kty: "EC",
+ Crv: name,
+ X: newFixedSizeBuffer(xBytes, size),
+ Y: newFixedSizeBuffer(yBytes, size),
+ }
+
+ return key, nil
+}
+
+func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
+ var missing []string
+ switch {
+ case key.D == nil:
+ missing = append(missing, "D")
+ case key.X == nil:
+ missing = append(missing, "X")
+ }
+
+ if len(missing) > 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
+ }
+
+ privateKey := make([]byte, ed25519.PrivateKeySize)
+ copy(privateKey[0:32], key.D.bytes())
+ copy(privateKey[32:], key.X.bytes())
+ rv := ed25519.PrivateKey(privateKey)
+ return rv, nil
+}
+
+func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
+ if key.X == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid Ed key, missing x value")
+ }
+ publicKey := make([]byte, ed25519.PublicKeySize)
+ copy(publicKey[0:32], key.X.bytes())
+ rv := ed25519.PublicKey(publicKey)
+ return rv, nil
+}
+
+func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
+ var missing []string
+ switch {
+ case key.N == nil:
+ missing = append(missing, "N")
+ case key.E == nil:
+ missing = append(missing, "E")
+ case key.D == nil:
+ missing = append(missing, "D")
+ case key.P == nil:
+ missing = append(missing, "P")
+ case key.Q == nil:
+ missing = append(missing, "Q")
+ }
+
+ if len(missing) > 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
+ }
+
+ rv := &rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ N: key.N.bigInt(),
+ E: key.E.toInt(),
+ },
+ D: key.D.bigInt(),
+ Primes: []*big.Int{
+ key.P.bigInt(),
+ key.Q.bigInt(),
+ },
+ }
+
+ if key.Dp != nil {
+ rv.Precomputed.Dp = key.Dp.bigInt()
+ }
+ if key.Dq != nil {
+ rv.Precomputed.Dq = key.Dq.bigInt()
+ }
+ if key.Qi != nil {
+ rv.Precomputed.Qinv = key.Qi.bigInt()
+ }
+
+ err := rv.Validate()
+ return rv, err
+}
+
+func fromEdPrivateKey(ed ed25519.PrivateKey) (*rawJSONWebKey, error) {
+ raw := fromEdPublicKey(ed25519.PublicKey(ed[32:]))
+
+ raw.D = newBuffer(ed[0:32])
+ return raw, nil
+}
+
+func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJSONWebKey, error) {
+ if len(rsa.Primes) != 2 {
+ return nil, ErrUnsupportedKeyType
+ }
+
+ raw := fromRsaPublicKey(&rsa.PublicKey)
+
+ raw.D = newBuffer(rsa.D.Bytes())
+ raw.P = newBuffer(rsa.Primes[0].Bytes())
+ raw.Q = newBuffer(rsa.Primes[1].Bytes())
+
+ if rsa.Precomputed.Dp != nil {
+ raw.Dp = newBuffer(rsa.Precomputed.Dp.Bytes())
+ }
+ if rsa.Precomputed.Dq != nil {
+ raw.Dq = newBuffer(rsa.Precomputed.Dq.Bytes())
+ }
+ if rsa.Precomputed.Qinv != nil {
+ raw.Qi = newBuffer(rsa.Precomputed.Qinv.Bytes())
+ }
+
+ return raw, nil
+}
+
+func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
+ var curve elliptic.Curve
+ switch key.Crv {
+ case "P-256":
+ curve = elliptic.P256()
+ case "P-384":
+ curve = elliptic.P384()
+ case "P-521":
+ curve = elliptic.P521()
+ default:
+ return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
+ }
+
+ if key.X == nil || key.Y == nil || key.D == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
+ }
+
+ // The length of this octet string MUST be the full size of a coordinate for
+ // the curve specified in the "crv" parameter.
+ // https://tools.ietf.org/html/rfc7518#section-6.2.1.2
+ if curveSize(curve) != len(key.X.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for x")
+ }
+
+ if curveSize(curve) != len(key.Y.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for y")
+ }
+
+ // https://tools.ietf.org/html/rfc7518#section-6.2.2.1
+ if dSize(curve) != len(key.D.data) {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for d")
+ }
+
+ x := key.X.bigInt()
+ y := key.Y.bigInt()
+
+ if !curve.IsOnCurve(x, y) {
+ return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
+ }
+
+ return &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: curve,
+ X: x,
+ Y: y,
+ },
+ D: key.D.bigInt(),
+ }, nil
+}
+
+func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) {
+ raw, err := fromEcPublicKey(&ec.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if ec.D == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid EC private key")
+ }
+
+ raw.D = newFixedSizeBuffer(ec.D.Bytes(), dSize(ec.PublicKey.Curve))
+
+ return raw, nil
+}
+
+// dSize returns the size in octets for the "d" member of an elliptic curve
+// private key.
+// The length of this octet string MUST be ceiling(log-base-2(n)/8)
+// octets (where n is the order of the curve).
+// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
+func dSize(curve elliptic.Curve) int {
+ order := curve.Params().P
+ bitLen := order.BitLen()
+ size := bitLen / 8
+ if bitLen%8 != 0 {
+ size = size + 1
+ }
+ return size
+}
+
+func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) {
+ return &rawJSONWebKey{
+ Kty: "oct",
+ K: newBuffer(key),
+ }, nil
+}
+
+func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
+ if key.K == nil {
+ return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value")
+ }
+ return key.K.bytes(), nil
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/jws.go b/vendor/gopkg.in/square/go-jose.v2/jws.go
new file mode 100644
index 000000000..8b59b6ab2
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/jws.go
@@ -0,0 +1,321 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "strings"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
+type rawJSONWebSignature struct {
+ Payload *byteBuffer `json:"payload,omitempty"`
+ Signatures []rawSignatureInfo `json:"signatures,omitempty"`
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Signature *byteBuffer `json:"signature,omitempty"`
+}
+
+// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
+type rawSignatureInfo struct {
+ Protected *byteBuffer `json:"protected,omitempty"`
+ Header *rawHeader `json:"header,omitempty"`
+ Signature *byteBuffer `json:"signature,omitempty"`
+}
+
+// JSONWebSignature represents a signed JWS object after parsing.
+type JSONWebSignature struct {
+ payload []byte
+ // Signatures attached to this object (may be more than one for multi-sig).
+ // Be careful about accessing these directly, prefer to use Verify() or
+ // VerifyMulti() to ensure that the data you're getting is verified.
+ Signatures []Signature
+}
+
+// Signature represents a single signature over the JWS payload and protected header.
+type Signature struct {
+ // Merged header fields. Contains both protected and unprotected header
+ // values. Prefer using Protected and Unprotected fields instead of this.
+ // Values in this header may or may not have been signed and in general
+ // should not be trusted.
+ Header Header
+
+ // Protected header. Values in this header were signed and
+ // will be verified as part of the signature verification process.
+ Protected Header
+
+ // Unprotected header. Values in this header were not signed
+ // and in general should not be trusted.
+ Unprotected Header
+
+ // The actual signature value
+ Signature []byte
+
+ protected *rawHeader
+ header *rawHeader
+ original *rawSignatureInfo
+}
+
+// ParseSigned parses a signed message in compact or full serialization format.
+func ParseSigned(input string) (*JSONWebSignature, error) {
+ input = stripWhitespace(input)
+ if strings.HasPrefix(input, "{") {
+ return parseSignedFull(input)
+ }
+
+ return parseSignedCompact(input)
+}
+
+// Get a header value
+func (sig Signature) mergedHeaders() rawHeader {
+ out := rawHeader{}
+ out.merge(sig.protected)
+ out.merge(sig.header)
+ return out
+}
+
+// Compute data to be signed
+func (obj JSONWebSignature) computeAuthData(payload []byte, signature *Signature) []byte {
+ var serializedProtected string
+
+ if signature.original != nil && signature.original.Protected != nil {
+ serializedProtected = signature.original.Protected.base64()
+ } else if signature.protected != nil {
+ serializedProtected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON(signature.protected))
+ } else {
+ serializedProtected = ""
+ }
+
+ return []byte(fmt.Sprintf("%s.%s",
+ serializedProtected,
+ base64.RawURLEncoding.EncodeToString(payload)))
+}
+
+// parseSignedFull parses a message in full format.
+func parseSignedFull(input string) (*JSONWebSignature, error) {
+ var parsed rawJSONWebSignature
+ err := json.Unmarshal([]byte(input), &parsed)
+ if err != nil {
+ return nil, err
+ }
+
+ return parsed.sanitized()
+}
+
+// sanitized produces a cleaned-up JWS object from the raw JSON.
+func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
+ if parsed.Payload == nil {
+ return nil, fmt.Errorf("square/go-jose: missing payload in JWS message")
+ }
+
+ obj := &JSONWebSignature{
+ payload: parsed.Payload.bytes(),
+ Signatures: make([]Signature, len(parsed.Signatures)),
+ }
+
+ if len(parsed.Signatures) == 0 {
+ // No signatures array, must be flattened serialization
+ signature := Signature{}
+ if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
+ signature.protected = &rawHeader{}
+ err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if parsed.Header != nil && parsed.Header.getNonce() != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ signature.header = parsed.Header
+ signature.Signature = parsed.Signature.bytes()
+ // Make a fake "original" rawSignatureInfo to store the unprocessed
+ // Protected header. This is necessary because the Protected header can
+ // contain arbitrary fields not registered as part of the spec. See
+ // https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
+ // If we unmarshal Protected into a rawHeader with its explicit list of fields,
+ // we cannot marshal losslessly. So we have to keep around the original bytes.
+ // This is used in computeAuthData, which will first attempt to use
+ // the original bytes of a protected header, and fall back on marshaling the
+ // header struct only if those bytes are not available.
+ signature.original = &rawSignatureInfo{
+ Protected: parsed.Protected,
+ Header: parsed.Header,
+ Signature: parsed.Signature,
+ }
+
+ var err error
+ signature.Header, err = signature.mergedHeaders().sanitized()
+ if err != nil {
+ return nil, err
+ }
+
+ if signature.header != nil {
+ signature.Unprotected, err = signature.header.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if signature.protected != nil {
+ signature.Protected, err = signature.protected.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
+ jwk := signature.Header.JSONWebKey
+ if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
+ return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
+ }
+
+ obj.Signatures = append(obj.Signatures, signature)
+ }
+
+ for i, sig := range parsed.Signatures {
+ if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
+ obj.Signatures[i].protected = &rawHeader{}
+ err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check that there is not a nonce in the unprotected header
+ if sig.Header != nil && sig.Header.getNonce() != "" {
+ return nil, ErrUnprotectedNonce
+ }
+
+ var err error
+ obj.Signatures[i].Header, err = obj.Signatures[i].mergedHeaders().sanitized()
+ if err != nil {
+ return nil, err
+ }
+
+ if obj.Signatures[i].header != nil {
+ obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if obj.Signatures[i].protected != nil {
+ obj.Signatures[i].Protected, err = obj.Signatures[i].protected.sanitized()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ obj.Signatures[i].Signature = sig.Signature.bytes()
+
+ // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
+ jwk := obj.Signatures[i].Header.JSONWebKey
+ if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
+ return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
+ }
+
+ // Copy value of sig
+ original := sig
+
+ obj.Signatures[i].header = sig.Header
+ obj.Signatures[i].original = &original
+ }
+
+ return obj, nil
+}
+
+// parseSignedCompact parses a message in compact format.
+func parseSignedCompact(input string) (*JSONWebSignature, error) {
+ parts := strings.Split(input, ".")
+ if len(parts) != 3 {
+ return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts")
+ }
+
+ rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
+ if err != nil {
+ return nil, err
+ }
+
+ payload, err := base64.RawURLEncoding.DecodeString(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ signature, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return nil, err
+ }
+
+ raw := &rawJSONWebSignature{
+ Payload: newBuffer(payload),
+ Protected: newBuffer(rawProtected),
+ Signature: newBuffer(signature),
+ }
+ return raw.sanitized()
+}
+
+// CompactSerialize serializes an object using the compact serialization format.
+func (obj JSONWebSignature) CompactSerialize() (string, error) {
+ if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
+ return "", ErrNotSupported
+ }
+
+ serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
+
+ return fmt.Sprintf(
+ "%s.%s.%s",
+ base64.RawURLEncoding.EncodeToString(serializedProtected),
+ base64.RawURLEncoding.EncodeToString(obj.payload),
+ base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)), nil
+}
+
+// FullSerialize serializes an object using the full JSON serialization format.
+func (obj JSONWebSignature) FullSerialize() string {
+ raw := rawJSONWebSignature{
+ Payload: newBuffer(obj.payload),
+ }
+
+ if len(obj.Signatures) == 1 {
+ if obj.Signatures[0].protected != nil {
+ serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
+ raw.Protected = newBuffer(serializedProtected)
+ }
+ raw.Header = obj.Signatures[0].header
+ raw.Signature = newBuffer(obj.Signatures[0].Signature)
+ } else {
+ raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
+ for i, signature := range obj.Signatures {
+ raw.Signatures[i] = rawSignatureInfo{
+ Header: signature.header,
+ Signature: newBuffer(signature.Signature),
+ }
+
+ if signature.protected != nil {
+ raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
+ }
+ }
+ }
+
+ return string(mustSerializeJSON(raw))
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/opaque.go b/vendor/gopkg.in/square/go-jose.v2/opaque.go
new file mode 100644
index 000000000..4a8bd8f32
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/opaque.go
@@ -0,0 +1,83 @@
+/*-
+ * Copyright 2018 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+// OpaqueSigner is an interface that supports signing payloads with opaque
+// private key(s). Private key operations preformed by implementors may, for
+// example, occur in a hardware module. An OpaqueSigner may rotate signing keys
+// transparently to the user of this interface.
+type OpaqueSigner interface {
+ // Public returns the public key of the current signing key.
+ Public() *JSONWebKey
+ // Algs returns a list of supported signing algorithms.
+ Algs() []SignatureAlgorithm
+ // SignPayload signs a payload with the current signing key using the given
+ // algorithm.
+ SignPayload(payload []byte, alg SignatureAlgorithm) ([]byte, error)
+}
+
+type opaqueSigner struct {
+ signer OpaqueSigner
+}
+
+func newOpaqueSigner(alg SignatureAlgorithm, signer OpaqueSigner) (recipientSigInfo, error) {
+ var algSupported bool
+ for _, salg := range signer.Algs() {
+ if alg == salg {
+ algSupported = true
+ break
+ }
+ }
+ if !algSupported {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientSigInfo{
+ sigAlg: alg,
+ publicKey: signer.Public,
+ signer: &opaqueSigner{
+ signer: signer,
+ },
+ }, nil
+}
+
+func (o *opaqueSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ out, err := o.signer.SignPayload(payload, alg)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// OpaqueVerifier is an interface that supports verifying payloads with opaque
+// public key(s). An OpaqueSigner may rotate signing keys transparently to the
+// user of this interface.
+type OpaqueVerifier interface {
+ VerifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
+}
+
+type opaqueVerifier struct {
+ verifier OpaqueVerifier
+}
+
+func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ return o.verifier.VerifyPayload(payload, signature, alg)
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/shared.go b/vendor/gopkg.in/square/go-jose.v2/shared.go
new file mode 100644
index 000000000..b0a6255ec
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/shared.go
@@ -0,0 +1,499 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/elliptic"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// KeyAlgorithm represents a key management algorithm.
+type KeyAlgorithm string
+
+// SignatureAlgorithm represents a signature (or MAC) algorithm.
+type SignatureAlgorithm string
+
+// ContentEncryption represents a content encryption algorithm.
+type ContentEncryption string
+
+// CompressionAlgorithm represents an algorithm used for plaintext compression.
+type CompressionAlgorithm string
+
+// ContentType represents type of the contained data.
+type ContentType string
+
+var (
+ // ErrCryptoFailure represents an error in cryptographic primitive. This
+ // occurs when, for example, a message had an invalid authentication tag or
+ // could not be decrypted.
+ ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive")
+
+ // ErrUnsupportedAlgorithm indicates that a selected algorithm is not
+ // supported. This occurs when trying to instantiate an encrypter for an
+ // algorithm that is not yet implemented.
+ ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm")
+
+ // ErrUnsupportedKeyType indicates that the given key type/format is not
+ // supported. This occurs when trying to instantiate an encrypter and passing
+ // it a key of an unrecognized type or with unsupported parameters, such as
+ // an RSA private key with more than two primes.
+ ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format")
+
+ // ErrInvalidKeySize indicates that the given key is not the correct size
+ // for the selected algorithm. This can occur, for example, when trying to
+ // encrypt with AES-256 but passing only a 128-bit key as input.
+ ErrInvalidKeySize = errors.New("square/go-jose: invalid key size for algorithm")
+
+ // ErrNotSupported serialization of object is not supported. This occurs when
+ // trying to compact-serialize an object which can't be represented in
+ // compact form.
+ ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object")
+
+ // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
+ // nonce header parameter was included in an unprotected header object.
+ ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header")
+)
+
+// Key management algorithms
+const (
+ ED25519 = KeyAlgorithm("ED25519")
+ RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
+ RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
+ RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
+ A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
+ A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
+ A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
+ DIRECT = KeyAlgorithm("dir") // Direct encryption
+ ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
+ ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
+ ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
+ ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
+ A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
+ A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
+ A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
+ PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
+ PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
+ PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
+)
+
+// Signature algorithms
+const (
+ EdDSA = SignatureAlgorithm("EdDSA")
+ HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
+ HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
+ HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
+ RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
+ RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
+ RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
+ ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
+ ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
+ ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
+ PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
+ PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
+ PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
+)
+
+// Content encryption algorithms
+const (
+ A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
+ A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
+ A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
+ A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
+ A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
+ A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
+)
+
+// Compression algorithms
+const (
+ NONE = CompressionAlgorithm("") // No compression
+ DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
+)
+
+// A key in the protected header of a JWS object. Use of the Header...
+// constants is preferred to enhance type safety.
+type HeaderKey string
+
+const (
+ HeaderType HeaderKey = "typ" // string
+ HeaderContentType = "cty" // string
+
+ // These are set by go-jose and shouldn't need to be set by consumers of the
+ // library.
+ headerAlgorithm = "alg" // string
+ headerEncryption = "enc" // ContentEncryption
+ headerCompression = "zip" // CompressionAlgorithm
+ headerCritical = "crit" // []string
+
+ headerAPU = "apu" // *byteBuffer
+ headerAPV = "apv" // *byteBuffer
+ headerEPK = "epk" // *JSONWebKey
+ headerIV = "iv" // *byteBuffer
+ headerTag = "tag" // *byteBuffer
+ headerX5c = "x5c" // []*x509.Certificate
+
+ headerJWK = "jwk" // *JSONWebKey
+ headerKeyID = "kid" // string
+ headerNonce = "nonce" // string
+
+ headerP2C = "p2c" // *byteBuffer (int)
+ headerP2S = "p2s" // *byteBuffer ([]byte)
+
+)
+
+// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
+//
+// The decoding of the constituent items is deferred because we want to marshal
+// some members into particular structs rather than generic maps, but at the
+// same time we need to receive any extra fields unhandled by this library to
+// pass through to consuming code in case it wants to examine them.
+type rawHeader map[HeaderKey]*json.RawMessage
+
+// Header represents the read-only JOSE header for JWE/JWS objects.
+type Header struct {
+ KeyID string
+ JSONWebKey *JSONWebKey
+ Algorithm string
+ Nonce string
+
+ // Unverified certificate chain parsed from x5c header.
+ certificates []*x509.Certificate
+
+ // Any headers not recognised above get unmarshaled
+ // from JSON in a generic manner and placed in this map.
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// Certificates verifies & returns the certificate chain present
+// in the x5c header field of a message, if one was present. Returns
+// an error if there was no x5c header present or the chain could
+// not be validated with the given verify options.
+func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) {
+ if len(h.certificates) == 0 {
+ return nil, errors.New("square/go-jose: no x5c header present in message")
+ }
+
+ leaf := h.certificates[0]
+ if opts.Intermediates == nil {
+ opts.Intermediates = x509.NewCertPool()
+ for _, intermediate := range h.certificates[1:] {
+ opts.Intermediates.AddCert(intermediate)
+ }
+ }
+
+ return leaf.Verify(opts)
+}
+
+func (parsed rawHeader) set(k HeaderKey, v interface{}) error {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+
+ parsed[k] = makeRawMessage(b)
+ return nil
+}
+
+// getString gets a string from the raw JSON, defaulting to "".
+func (parsed rawHeader) getString(k HeaderKey) string {
+ v, ok := parsed[k]
+ if !ok || v == nil {
+ return ""
+ }
+ var s string
+ err := json.Unmarshal(*v, &s)
+ if err != nil {
+ return ""
+ }
+ return s
+}
+
+// getByteBuffer gets a byte buffer from the raw JSON. Returns (nil, nil) if
+// not specified.
+func (parsed rawHeader) getByteBuffer(k HeaderKey) (*byteBuffer, error) {
+ v := parsed[k]
+ if v == nil {
+ return nil, nil
+ }
+ var bb *byteBuffer
+ err := json.Unmarshal(*v, &bb)
+ if err != nil {
+ return nil, err
+ }
+ return bb, nil
+}
+
+// getAlgorithm extracts parsed "alg" from the raw JSON as a KeyAlgorithm.
+func (parsed rawHeader) getAlgorithm() KeyAlgorithm {
+ return KeyAlgorithm(parsed.getString(headerAlgorithm))
+}
+
+// getSignatureAlgorithm extracts parsed "alg" from the raw JSON as a SignatureAlgorithm.
+func (parsed rawHeader) getSignatureAlgorithm() SignatureAlgorithm {
+ return SignatureAlgorithm(parsed.getString(headerAlgorithm))
+}
+
+// getEncryption extracts parsed "enc" from the raw JSON.
+func (parsed rawHeader) getEncryption() ContentEncryption {
+ return ContentEncryption(parsed.getString(headerEncryption))
+}
+
+// getCompression extracts parsed "zip" from the raw JSON.
+func (parsed rawHeader) getCompression() CompressionAlgorithm {
+ return CompressionAlgorithm(parsed.getString(headerCompression))
+}
+
+func (parsed rawHeader) getNonce() string {
+ return parsed.getString(headerNonce)
+}
+
+// getEPK extracts parsed "epk" from the raw JSON.
+func (parsed rawHeader) getEPK() (*JSONWebKey, error) {
+ v := parsed[headerEPK]
+ if v == nil {
+ return nil, nil
+ }
+ var epk *JSONWebKey
+ err := json.Unmarshal(*v, &epk)
+ if err != nil {
+ return nil, err
+ }
+ return epk, nil
+}
+
+// getAPU extracts parsed "apu" from the raw JSON.
+func (parsed rawHeader) getAPU() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerAPU)
+}
+
+// getAPV extracts parsed "apv" from the raw JSON.
+func (parsed rawHeader) getAPV() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerAPV)
+}
+
+// getIV extracts parsed "iv" frpom the raw JSON.
+func (parsed rawHeader) getIV() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerIV)
+}
+
+// getTag extracts parsed "tag" frpom the raw JSON.
+func (parsed rawHeader) getTag() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerTag)
+}
+
+// getJWK extracts parsed "jwk" from the raw JSON.
+func (parsed rawHeader) getJWK() (*JSONWebKey, error) {
+ v := parsed[headerJWK]
+ if v == nil {
+ return nil, nil
+ }
+ var jwk *JSONWebKey
+ err := json.Unmarshal(*v, &jwk)
+ if err != nil {
+ return nil, err
+ }
+ return jwk, nil
+}
+
+// getCritical extracts parsed "crit" from the raw JSON. If omitted, it
+// returns an empty slice.
+func (parsed rawHeader) getCritical() ([]string, error) {
+ v := parsed[headerCritical]
+ if v == nil {
+ return nil, nil
+ }
+
+ var q []string
+ err := json.Unmarshal(*v, &q)
+ if err != nil {
+ return nil, err
+ }
+ return q, nil
+}
+
+// getS2C extracts parsed "p2c" from the raw JSON.
+func (parsed rawHeader) getP2C() (int, error) {
+ v := parsed[headerP2C]
+ if v == nil {
+ return 0, nil
+ }
+
+ var p2c int
+ err := json.Unmarshal(*v, &p2c)
+ if err != nil {
+ return 0, err
+ }
+ return p2c, nil
+}
+
+// getS2S extracts parsed "p2s" from the raw JSON.
+func (parsed rawHeader) getP2S() (*byteBuffer, error) {
+ return parsed.getByteBuffer(headerP2S)
+}
+
+// sanitized produces a cleaned-up header object from the raw JSON.
+func (parsed rawHeader) sanitized() (h Header, err error) {
+ for k, v := range parsed {
+ if v == nil {
+ continue
+ }
+ switch k {
+ case headerJWK:
+ var jwk *JSONWebKey
+ err = json.Unmarshal(*v, &jwk)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal JWK: %v: %#v", err, string(*v))
+ return
+ }
+ h.JSONWebKey = jwk
+ case headerKeyID:
+ var s string
+ err = json.Unmarshal(*v, &s)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal key ID: %v: %#v", err, string(*v))
+ return
+ }
+ h.KeyID = s
+ case headerAlgorithm:
+ var s string
+ err = json.Unmarshal(*v, &s)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal algorithm: %v: %#v", err, string(*v))
+ return
+ }
+ h.Algorithm = s
+ case headerNonce:
+ var s string
+ err = json.Unmarshal(*v, &s)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal nonce: %v: %#v", err, string(*v))
+ return
+ }
+ h.Nonce = s
+ case headerX5c:
+ c := []string{}
+ err = json.Unmarshal(*v, &c)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
+ return
+ }
+ h.certificates, err = parseCertificateChain(c)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
+ return
+ }
+ default:
+ if h.ExtraHeaders == nil {
+ h.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ var v2 interface{}
+ err = json.Unmarshal(*v, &v2)
+ if err != nil {
+ err = fmt.Errorf("failed to unmarshal value: %v: %#v", err, string(*v))
+ return
+ }
+ h.ExtraHeaders[k] = v2
+ }
+ }
+ return
+}
+
+func parseCertificateChain(chain []string) ([]*x509.Certificate, error) {
+ out := make([]*x509.Certificate, len(chain))
+ for i, cert := range chain {
+ raw, err := base64.StdEncoding.DecodeString(cert)
+ if err != nil {
+ return nil, err
+ }
+ out[i], err = x509.ParseCertificate(raw)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+func (dst rawHeader) isSet(k HeaderKey) bool {
+ dvr := dst[k]
+ if dvr == nil {
+ return false
+ }
+
+ var dv interface{}
+ err := json.Unmarshal(*dvr, &dv)
+ if err != nil {
+ return true
+ }
+
+ if dvStr, ok := dv.(string); ok {
+ return dvStr != ""
+ }
+
+ return true
+}
+
+// Merge headers from src into dst, giving precedence to headers from l.
+func (dst rawHeader) merge(src *rawHeader) {
+ if src == nil {
+ return
+ }
+
+ for k, v := range *src {
+ if dst.isSet(k) {
+ continue
+ }
+
+ dst[k] = v
+ }
+}
+
+// Get JOSE name of curve
+func curveName(crv elliptic.Curve) (string, error) {
+ switch crv {
+ case elliptic.P256():
+ return "P-256", nil
+ case elliptic.P384():
+ return "P-384", nil
+ case elliptic.P521():
+ return "P-521", nil
+ default:
+ return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve")
+ }
+}
+
+// Get size of curve in bytes
+func curveSize(crv elliptic.Curve) int {
+ bits := crv.Params().BitSize
+
+ div := bits / 8
+ mod := bits % 8
+
+ if mod == 0 {
+ return div
+ }
+
+ return div + 1
+}
+
+func makeRawMessage(b []byte) *json.RawMessage {
+ rm := json.RawMessage(b)
+ return &rm
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/signing.go b/vendor/gopkg.in/square/go-jose.v2/signing.go
new file mode 100644
index 000000000..be6cf0481
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/signing.go
@@ -0,0 +1,389 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "encoding/base64"
+ "errors"
+ "fmt"
+
+ "golang.org/x/crypto/ed25519"
+
+ "gopkg.in/square/go-jose.v2/json"
+)
+
+// NonceSource represents a source of random nonces to go into JWS objects
+type NonceSource interface {
+ Nonce() (string, error)
+}
+
+// Signer represents a signer which takes a payload and produces a signed JWS object.
+type Signer interface {
+ Sign(payload []byte) (*JSONWebSignature, error)
+ Options() SignerOptions
+}
+
+// SigningKey represents an algorithm/key used to sign a message.
+type SigningKey struct {
+ Algorithm SignatureAlgorithm
+ Key interface{}
+}
+
+// SignerOptions represents options that can be set when creating signers.
+type SignerOptions struct {
+ NonceSource NonceSource
+ EmbedJWK bool
+
+ // Optional map of additional keys to be inserted into the protected header
+ // of a JWS object. Some specifications which make use of JWS like to insert
+ // additional values here. All values must be JSON-serializable.
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary. It returns itself and so can be used in a fluent style.
+func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
+ if so.ExtraHeaders == nil {
+ so.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ so.ExtraHeaders[k] = v
+ return so
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// SignerOptions.
+func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions {
+ return so.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated SignerOptions.
+func (so *SignerOptions) WithType(typ ContentType) *SignerOptions {
+ return so.WithHeader(HeaderType, typ)
+}
+
+type payloadSigner interface {
+ signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
+}
+
+type payloadVerifier interface {
+ verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
+}
+
+type genericSigner struct {
+ recipients []recipientSigInfo
+ nonceSource NonceSource
+ embedJWK bool
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientSigInfo struct {
+ sigAlg SignatureAlgorithm
+ publicKey func() *JSONWebKey
+ signer payloadSigner
+}
+
+func staticPublicKey(jwk *JSONWebKey) func() *JSONWebKey {
+ return func() *JSONWebKey {
+ return jwk
+ }
+}
+
+// NewSigner creates an appropriate signer based on the key type
+func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) {
+ return NewMultiSigner([]SigningKey{sig}, opts)
+}
+
+// NewMultiSigner creates a signer for multiple recipients
+func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) {
+ signer := &genericSigner{recipients: []recipientSigInfo{}}
+
+ if opts != nil {
+ signer.nonceSource = opts.NonceSource
+ signer.embedJWK = opts.EmbedJWK
+ signer.extraHeaders = opts.ExtraHeaders
+ }
+
+ for _, sig := range sigs {
+ err := signer.addRecipient(sig.Algorithm, sig.Key)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return signer, nil
+}
+
+// newVerifier creates a verifier based on the key type
+func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
+ switch verificationKey := verificationKey.(type) {
+ case ed25519.PublicKey:
+ return &edEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case *rsa.PublicKey:
+ return &rsaEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case *ecdsa.PublicKey:
+ return &ecEncrypterVerifier{
+ publicKey: verificationKey,
+ }, nil
+ case []byte:
+ return &symmetricMac{
+ key: verificationKey,
+ }, nil
+ case JSONWebKey:
+ return newVerifier(verificationKey.Key)
+ case *JSONWebKey:
+ return newVerifier(verificationKey.Key)
+ }
+ if ov, ok := verificationKey.(OpaqueVerifier); ok {
+ return &opaqueVerifier{verifier: ov}, nil
+ }
+ return nil, ErrUnsupportedKeyType
+}
+
+func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
+ recipient, err := makeJWSRecipient(alg, signingKey)
+ if err != nil {
+ return err
+ }
+
+ ctx.recipients = append(ctx.recipients, recipient)
+ return nil
+}
+
+func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
+ switch signingKey := signingKey.(type) {
+ case ed25519.PrivateKey:
+ return newEd25519Signer(alg, signingKey)
+ case *rsa.PrivateKey:
+ return newRSASigner(alg, signingKey)
+ case *ecdsa.PrivateKey:
+ return newECDSASigner(alg, signingKey)
+ case []byte:
+ return newSymmetricSigner(alg, signingKey)
+ case JSONWebKey:
+ return newJWKSigner(alg, signingKey)
+ case *JSONWebKey:
+ return newJWKSigner(alg, *signingKey)
+ }
+ if signer, ok := signingKey.(OpaqueSigner); ok {
+ return newOpaqueSigner(alg, signer)
+ }
+ return recipientSigInfo{}, ErrUnsupportedKeyType
+}
+
+func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
+ recipient, err := makeJWSRecipient(alg, signingKey.Key)
+ if err != nil {
+ return recipientSigInfo{}, err
+ }
+ if recipient.publicKey != nil && recipient.publicKey() != nil {
+ // recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo
+ // was created for the inner key (such as a RSA or ECDSA public key). It contains
+ // the pub key for embedding, but doesn't have extra params like key id.
+ publicKey := signingKey
+ publicKey.Key = recipient.publicKey().Key
+ recipient.publicKey = staticPublicKey(&publicKey)
+
+ // This should be impossible, but let's check anyway.
+ if !recipient.publicKey().IsPublic() {
+ return recipientSigInfo{}, errors.New("square/go-jose: public key was unexpectedly not public")
+ }
+ }
+ return recipient, nil
+}
+
+func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
+ obj := &JSONWebSignature{}
+ obj.payload = payload
+ obj.Signatures = make([]Signature, len(ctx.recipients))
+
+ for i, recipient := range ctx.recipients {
+ protected := map[HeaderKey]interface{}{
+ headerAlgorithm: string(recipient.sigAlg),
+ }
+
+ if recipient.publicKey != nil && recipient.publicKey() != nil {
+ // We want to embed the JWK or set the kid header, but not both. Having a protected
+ // header that contains an embedded JWK while also simultaneously containing the kid
+ // header is confusing, and at least in ACME the two are considered to be mutually
+ // exclusive. The fact that both can exist at the same time is a somewhat unfortunate
+ // result of the JOSE spec. We've decided that this library will only include one or
+ // the other to avoid this confusion.
+ //
+ // See https://github.com/square/go-jose/issues/157 for more context.
+ if ctx.embedJWK {
+ protected[headerJWK] = recipient.publicKey()
+ } else {
+ protected[headerKeyID] = recipient.publicKey().KeyID
+ }
+ }
+
+ if ctx.nonceSource != nil {
+ nonce, err := ctx.nonceSource.Nonce()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err)
+ }
+ protected[headerNonce] = nonce
+ }
+
+ for k, v := range ctx.extraHeaders {
+ protected[k] = v
+ }
+
+ serializedProtected := mustSerializeJSON(protected)
+
+ input := []byte(fmt.Sprintf("%s.%s",
+ base64.RawURLEncoding.EncodeToString(serializedProtected),
+ base64.RawURLEncoding.EncodeToString(payload)))
+
+ signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ signatureInfo.protected = &rawHeader{}
+ for k, v := range protected {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: Error marshalling item %#v: %v", k, err)
+ }
+ (*signatureInfo.protected)[k] = makeRawMessage(b)
+ }
+ obj.Signatures[i] = signatureInfo
+ }
+
+ return obj, nil
+}
+
+func (ctx *genericSigner) Options() SignerOptions {
+ return SignerOptions{
+ NonceSource: ctx.nonceSource,
+ EmbedJWK: ctx.embedJWK,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Verify validates the signature on the object and returns the payload.
+// This function does not support multi-signature, if you desire multi-sig
+// verification use VerifyMulti instead.
+//
+// Be careful when verifying signatures based on embedded JWKs inside the
+// payload header. You cannot assume that the key received in a payload is
+// trusted.
+func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
+ err := obj.DetachedVerify(obj.payload, verificationKey)
+ if err != nil {
+ return nil, err
+ }
+ return obj.payload, nil
+}
+
+// UnsafePayloadWithoutVerification returns the payload without
+// verifying it. The content returned from this function cannot be
+// trusted.
+func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
+ return obj.payload
+}
+
+// DetachedVerify validates a detached signature on the given payload. In
+// most cases, you will probably want to use Verify instead. DetachedVerify
+// is only useful if you have a payload and signature that are separated from
+// each other.
+func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
+ verifier, err := newVerifier(verificationKey)
+ if err != nil {
+ return err
+ }
+
+ if len(obj.Signatures) > 1 {
+ return errors.New("square/go-jose: too many signatures in payload; expecting only one")
+ }
+
+ signature := obj.Signatures[0]
+ headers := signature.mergedHeaders()
+ critical, err := headers.getCritical()
+ if err != nil {
+ return err
+ }
+ if len(critical) > 0 {
+ // Unsupported crit header
+ return ErrCryptoFailure
+ }
+
+ input := obj.computeAuthData(payload, &signature)
+ alg := headers.getSignatureAlgorithm()
+ err = verifier.verifyPayload(input, signature.Signature, alg)
+ if err == nil {
+ return nil
+ }
+
+ return ErrCryptoFailure
+}
+
+// VerifyMulti validates (one of the multiple) signatures on the object and
+// returns the index of the signature that was verified, along with the signature
+// object and the payload. We return the signature and index to guarantee that
+// callers are getting the verified value.
+func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
+ idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
+ if err != nil {
+ return -1, Signature{}, nil, err
+ }
+ return idx, sig, obj.payload, nil
+}
+
+// DetachedVerifyMulti validates a detached signature on the given payload with
+// a signature/object that has potentially multiple signers. This returns the index
+// of the signature that was verified, along with the signature object. We return
+// the signature and index to guarantee that callers are getting the verified value.
+//
+// In most cases, you will probably want to use Verify or VerifyMulti instead.
+// DetachedVerifyMulti is only useful if you have a payload and signature that are
+// separated from each other, and the signature can have multiple signers at the
+// same time.
+func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
+ verifier, err := newVerifier(verificationKey)
+ if err != nil {
+ return -1, Signature{}, err
+ }
+
+ for i, signature := range obj.Signatures {
+ headers := signature.mergedHeaders()
+ critical, err := headers.getCritical()
+ if err != nil {
+ continue
+ }
+ if len(critical) > 0 {
+ // Unsupported crit header
+ continue
+ }
+
+ input := obj.computeAuthData(payload, &signature)
+ alg := headers.getSignatureAlgorithm()
+ err = verifier.verifyPayload(input, signature.Signature, alg)
+ if err == nil {
+ return i, signature, nil
+ }
+ }
+
+ return -1, Signature{}, ErrCryptoFailure
+}
diff --git a/vendor/gopkg.in/square/go-jose.v2/symmetric.go b/vendor/gopkg.in/square/go-jose.v2/symmetric.go
new file mode 100644
index 000000000..264a0fe37
--- /dev/null
+++ b/vendor/gopkg.in/square/go-jose.v2/symmetric.go
@@ -0,0 +1,482 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+
+ "golang.org/x/crypto/pbkdf2"
+ "gopkg.in/square/go-jose.v2/cipher"
+)
+
+// Random reader (stubbed out in tests)
+var RandReader = rand.Reader
+
+const (
+ // RFC7518 recommends a minimum of 1,000 iterations:
+ // https://tools.ietf.org/html/rfc7518#section-4.8.1.2
+ // NIST recommends a minimum of 10,000:
+ // https://pages.nist.gov/800-63-3/sp800-63b.html
+ // 1Password uses 100,000:
+ // https://support.1password.com/pbkdf2/
+ defaultP2C = 100000
+ // Default salt size: 128 bits
+ defaultP2SSize = 16
+)
+
+// Dummy key cipher for shared symmetric key mode
+type symmetricKeyCipher struct {
+ key []byte // Pre-shared content-encryption key
+ p2c int // PBES2 Count
+ p2s []byte // PBES2 Salt Input
+}
+
+// Signer/verifier for MAC modes
+type symmetricMac struct {
+ key []byte
+}
+
+// Input/output from an AEAD operation
+type aeadParts struct {
+ iv, ciphertext, tag []byte
+}
+
+// A content cipher based on an AEAD construction
+type aeadContentCipher struct {
+ keyBytes int
+ authtagBytes int
+ getAead func(key []byte) (cipher.AEAD, error)
+}
+
+// Random key generator
+type randomKeyGenerator struct {
+ size int
+}
+
+// Static key generator
+type staticKeyGenerator struct {
+ key []byte
+}
+
+// Create a new content cipher based on AES-GCM
+func newAESGCM(keySize int) contentCipher {
+ return &aeadContentCipher{
+ keyBytes: keySize,
+ authtagBytes: 16,
+ getAead: func(key []byte) (cipher.AEAD, error) {
+ aes, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return cipher.NewGCM(aes)
+ },
+ }
+}
+
+// Create a new content cipher based on AES-CBC+HMAC
+func newAESCBC(keySize int) contentCipher {
+ return &aeadContentCipher{
+ keyBytes: keySize * 2,
+ authtagBytes: keySize,
+ getAead: func(key []byte) (cipher.AEAD, error) {
+ return josecipher.NewCBCHMAC(key, aes.NewCipher)
+ },
+ }
+}
+
+// Get an AEAD cipher object for the given content encryption algorithm
+func getContentCipher(alg ContentEncryption) contentCipher {
+ switch alg {
+ case A128GCM:
+ return newAESGCM(16)
+ case A192GCM:
+ return newAESGCM(24)
+ case A256GCM:
+ return newAESGCM(32)
+ case A128CBC_HS256:
+ return newAESCBC(16)
+ case A192CBC_HS384:
+ return newAESCBC(24)
+ case A256CBC_HS512:
+ return newAESCBC(32)
+ default:
+ return nil
+ }
+}
+
+// getPbkdf2Params returns the key length and hash function used in
+// pbkdf2.Key.
+func getPbkdf2Params(alg KeyAlgorithm) (int, func() hash.Hash) {
+ switch alg {
+ case PBES2_HS256_A128KW:
+ return 16, sha256.New
+ case PBES2_HS384_A192KW:
+ return 24, sha512.New384
+ case PBES2_HS512_A256KW:
+ return 32, sha512.New
+ default:
+ panic("invalid algorithm")
+ }
+}
+
+// getRandomSalt generates a new salt of the given size.
+func getRandomSalt(size int) ([]byte, error) {
+ salt := make([]byte, size)
+ _, err := io.ReadFull(RandReader, salt)
+ if err != nil {
+ return nil, err
+ }
+
+ return salt, nil
+}
+
+// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
+func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
+ switch keyAlg {
+ case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &symmetricKeyCipher{
+ key: key,
+ },
+ }, nil
+}
+
+// newSymmetricSigner creates a recipientSigInfo based on the given key.
+func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case HS256, HS384, HS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ signer: &symmetricMac{
+ key: key,
+ },
+ }, nil
+}
+
+// Generate a random key for the given content cipher
+func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ key := make([]byte, ctx.size)
+ _, err := io.ReadFull(RandReader, key)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ return key, rawHeader{}, nil
+}
+
+// Key size for random generator
+func (ctx randomKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Generate a static key (for direct mode)
+func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ cek := make([]byte, len(ctx.key))
+ copy(cek, ctx.key)
+ return cek, rawHeader{}, nil
+}
+
+// Key size for static generator
+func (ctx staticKeyGenerator) keySize() int {
+ return len(ctx.key)
+}
+
+// Get key size for this cipher
+func (ctx aeadContentCipher) keySize() int {
+ return ctx.keyBytes
+}
+
+// Encrypt some data
+func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
+ // Get a new AEAD instance
+ aead, err := ctx.getAead(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize a new nonce
+ iv := make([]byte, aead.NonceSize())
+ _, err = io.ReadFull(RandReader, iv)
+ if err != nil {
+ return nil, err
+ }
+
+ ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
+ offset := len(ciphertextAndTag) - ctx.authtagBytes
+
+ return &aeadParts{
+ iv: iv,
+ ciphertext: ciphertextAndTag[:offset],
+ tag: ciphertextAndTag[offset:],
+ }, nil
+}
+
+// Decrypt some data
+func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
+ aead, err := ctx.getAead(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(parts.iv) != aead.NonceSize() || len(parts.tag) < ctx.authtagBytes {
+ return nil, ErrCryptoFailure
+ }
+
+ return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
+}
+
+// Encrypt the content encryption key.
+func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case DIRECT:
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case A128GCMKW, A192GCMKW, A256GCMKW:
+ aead := newAESGCM(len(ctx.key))
+
+ parts, err := aead.encrypt(ctx.key, []byte{}, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ header := &rawHeader{}
+ header.set(headerIV, newBuffer(parts.iv))
+ header.set(headerTag, newBuffer(parts.tag))
+
+ return recipientInfo{
+ header: header,
+ encryptedKey: parts.ciphertext,
+ }, nil
+ case A128KW, A192KW, A256KW:
+ block, err := aes.NewCipher(ctx.key)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &rawHeader{},
+ }, nil
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if len(ctx.p2s) == 0 {
+ salt, err := getRandomSalt(defaultP2SSize)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+ ctx.p2s = salt
+ }
+
+ if ctx.p2c <= 0 {
+ ctx.p2c = defaultP2C
+ }
+
+ // salt is UTF8(Alg) || 0x00 || Salt Input
+ salt := bytes.Join([][]byte{[]byte(alg), ctx.p2s}, []byte{0x00})
+
+ // derive key
+ keyLen, h := getPbkdf2Params(alg)
+ key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h)
+
+ // use AES cipher with derived key
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ header := &rawHeader{}
+ header.set(headerP2C, ctx.p2c)
+ header.set(headerP2S, newBuffer(ctx.p2s))
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: header,
+ }, nil
+ }
+
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the content encryption key.
+func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ switch headers.getAlgorithm() {
+ case DIRECT:
+ cek := make([]byte, len(ctx.key))
+ copy(cek, ctx.key)
+ return cek, nil
+ case A128GCMKW, A192GCMKW, A256GCMKW:
+ aead := newAESGCM(len(ctx.key))
+
+ iv, err := headers.getIV()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid IV: %v", err)
+ }
+ tag, err := headers.getTag()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid tag: %v", err)
+ }
+
+ parts := &aeadParts{
+ iv: iv.bytes(),
+ ciphertext: recipient.encryptedKey,
+ tag: tag.bytes(),
+ }
+
+ cek, err := aead.decrypt(ctx.key, []byte{}, parts)
+ if err != nil {
+ return nil, err
+ }
+
+ return cek, nil
+ case A128KW, A192KW, A256KW:
+ block, err := aes.NewCipher(ctx.key)
+ if err != nil {
+ return nil, err
+ }
+
+ cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
+ if err != nil {
+ return nil, err
+ }
+ return cek, nil
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ p2s, err := headers.getP2S()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid P2S: %v", err)
+ }
+ if p2s == nil || len(p2s.data) == 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid P2S: must be present")
+ }
+
+ p2c, err := headers.getP2C()
+ if err != nil {
+ return nil, fmt.Errorf("square/go-jose: invalid P2C: %v", err)
+ }
+ if p2c <= 0 {
+ return nil, fmt.Errorf("square/go-jose: invalid P2C: must be a positive integer")
+ }
+
+ // salt is UTF8(Alg) || 0x00 || Salt Input
+ alg := headers.getAlgorithm()
+ salt := bytes.Join([][]byte{[]byte(alg), p2s.bytes()}, []byte{0x00})
+
+ // derive key
+ keyLen, h := getPbkdf2Params(alg)
+ key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h)
+
+ // use AES cipher with derived key
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
+ if err != nil {
+ return nil, err
+ }
+ return cek, nil
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ mac, err := ctx.hmac(payload, alg)
+ if err != nil {
+ return Signature{}, errors.New("square/go-jose: failed to compute hmac")
+ }
+
+ return Signature{
+ Signature: mac,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
+ expected, err := ctx.hmac(payload, alg)
+ if err != nil {
+ return errors.New("square/go-jose: failed to compute hmac")
+ }
+
+ if len(mac) != len(expected) {
+ return errors.New("square/go-jose: invalid hmac")
+ }
+
+ match := subtle.ConstantTimeCompare(mac, expected)
+ if match != 1 {
+ return errors.New("square/go-jose: invalid hmac")
+ }
+
+ return nil
+}
+
+// Compute the HMAC based on the given alg value
+func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
+ var hash func() hash.Hash
+
+ switch alg {
+ case HS256:
+ hash = sha256.New
+ case HS384:
+ hash = sha512.New384
+ case HS512:
+ hash = sha512.New
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ hmac := hmac.New(hash, ctx.key)
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hmac.Write(payload)
+ return hmac.Sum(nil), nil
+}