aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/PuerkitoBio
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@gmail.com>2017-11-01 11:24:59 -0400
committerMatthew Heon <matthew.heon@gmail.com>2017-11-01 11:24:59 -0400
commita031b83a09a8628435317a03f199cdc18b78262f (patch)
treebc017a96769ce6de33745b8b0b1304ccf38e9df0 /vendor/github.com/PuerkitoBio
parent2b74391cd5281f6fdf391ff8ad50fd1490f6bf89 (diff)
downloadpodman-a031b83a09a8628435317a03f199cdc18b78262f.tar.gz
podman-a031b83a09a8628435317a03f199cdc18b78262f.tar.bz2
podman-a031b83a09a8628435317a03f199cdc18b78262f.zip
Initial checkin from CRI-O repo
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
Diffstat (limited to 'vendor/github.com/PuerkitoBio')
-rw-r--r--vendor/github.com/PuerkitoBio/purell/LICENSE12
-rw-r--r--vendor/github.com/PuerkitoBio/purell/README.md187
-rw-r--r--vendor/github.com/PuerkitoBio/purell/purell.go379
-rw-r--r--vendor/github.com/PuerkitoBio/urlesc/LICENSE27
-rw-r--r--vendor/github.com/PuerkitoBio/urlesc/README.md16
-rw-r--r--vendor/github.com/PuerkitoBio/urlesc/urlesc.go180
6 files changed, 801 insertions, 0 deletions
diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE
new file mode 100644
index 000000000..4b9986dea
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2012, Martin Angers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md
new file mode 100644
index 000000000..09e8a32cb
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/README.md
@@ -0,0 +1,187 @@
+# Purell
+
+Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
+
+Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
+
+[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
+
+## Install
+
+`go get github.com/PuerkitoBio/purell`
+
+## Changelog
+
+* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
+* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
+* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
+* **v0.2.0** : Add benchmarks, Attempt IDN support.
+* **v0.1.0** : Initial release.
+
+## Examples
+
+From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
+
+```go
+package purell
+
+import (
+ "fmt"
+ "net/url"
+)
+
+func ExampleNormalizeURLString() {
+ if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
+ FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
+ panic(err)
+ } else {
+ fmt.Print(normalized)
+ }
+ // Output: http://somewebsite.com:80/Amazing%3F/url/
+}
+
+func ExampleMustNormalizeURLString() {
+ normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
+ FlagsUnsafeGreedy)
+ fmt.Print(normalized)
+
+ // Output: http://somewebsite.com/Amazing%FA/url
+}
+
+func ExampleNormalizeURL() {
+ if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
+ panic(err)
+ } else {
+ normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
+ fmt.Print(normalized)
+ }
+
+ // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
+}
+```
+
+## API
+
+As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
+
+```go
+const (
+ // Safe normalizations
+ FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+ FlagLowercaseHost // http://HOST -> http://host
+ FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
+ FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
+ FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
+ FlagRemoveDefaultPort // http://host:80 -> http://host
+ FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
+
+ // Usually safe normalizations
+ FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+ FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+ FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
+
+ // Unsafe normalizations
+ FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
+ FlagRemoveFragment // http://host/path#fragment -> http://host/path
+ FlagForceHTTP // https://host -> http://host
+ FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+ FlagRemoveWWW // http://www.host/ -> http://host/
+ FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+ FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+ // Normalizations not in the wikipedia article, required to cover tests cases
+ // submitted by jehiah
+ FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
+ FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
+ FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
+ FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+ FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
+
+ // Convenience set of safe normalizations
+ FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+ // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+ // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+ // Convenience set of usually safe normalizations (includes FlagsSafe)
+ FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+ FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+ // Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+ FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+ FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+ // Convenience set of all available flags
+ FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+ FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+```
+
+For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
+
+The [full godoc reference is available on gopkgdoc][godoc].
+
+Some things to note:
+
+* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
+
+* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
+ - %24 -> $
+ - %26 -> &
+ - %2B-%3B -> +,-./0123456789:;
+ - %3D -> =
+ - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ - %5F -> _
+ - %61-%7A -> abcdefghijklmnopqrstuvwxyz
+ - %7E -> ~
+
+
+* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
+
+* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
+
+* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
+
+### Safe vs Usually Safe vs Unsafe
+
+Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
+
+Consider the following URL:
+
+`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+Normalizing with the `FlagsSafe` gives:
+
+`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+With the `FlagsUsuallySafeGreedy`:
+
+`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
+
+And with `FlagsUnsafeGreedy`:
+
+`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
+
+## TODOs
+
+* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
+
+## Thanks / Contributions
+
+@rogpeppe
+@jehiah
+@opennota
+@pchristopher1275
+@zenovich
+@beeker1121
+
+## License
+
+The [BSD 3-Clause license][bsd].
+
+[bsd]: http://opensource.org/licenses/BSD-3-Clause
+[wiki]: http://en.wikipedia.org/wiki/URL_normalization
+[rfc]: http://tools.ietf.org/html/rfc3986#section-6
+[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
+[pr5]: https://github.com/PuerkitoBio/purell/pull/5
+[iss7]: https://github.com/PuerkitoBio/purell/issues/7
diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go
new file mode 100644
index 000000000..645e1b76f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/purell.go
@@ -0,0 +1,379 @@
+/*
+Package purell offers URL normalization as described on the wikipedia page:
+http://en.wikipedia.org/wiki/URL_normalization
+*/
+package purell
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/PuerkitoBio/urlesc"
+ "golang.org/x/net/idna"
+ "golang.org/x/text/unicode/norm"
+ "golang.org/x/text/width"
+)
+
+// A set of normalization flags determines how a URL will
+// be normalized.
+type NormalizationFlags uint
+
+const (
+ // Safe normalizations
+ FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+ FlagLowercaseHost // http://HOST -> http://host
+ FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
+ FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
+ FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
+ FlagRemoveDefaultPort // http://host:80 -> http://host
+ FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
+
+ // Usually safe normalizations
+ FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+ FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+ FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
+
+ // Unsafe normalizations
+ FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
+ FlagRemoveFragment // http://host/path#fragment -> http://host/path
+ FlagForceHTTP // https://host -> http://host
+ FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+ FlagRemoveWWW // http://www.host/ -> http://host/
+ FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+ FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+ // Normalizations not in the wikipedia article, required to cover tests cases
+ // submitted by jehiah
+ FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
+ FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
+ FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
+ FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+ FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
+
+ // Convenience set of safe normalizations
+ FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+ // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+ // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+ // Convenience set of usually safe normalizations (includes FlagsSafe)
+ FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+ FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+ // Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+ FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+ FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+ // Convenience set of all available flags
+ FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+ FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+
+const (
+ defaultHttpPort = ":80"
+ defaultHttpsPort = ":443"
+)
+
+// Regular expressions used by the normalizations
+var rxPort = regexp.MustCompile(`(:\d+)/?$`)
+var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
+var rxDupSlashes = regexp.MustCompile(`/{2,}`)
+var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
+var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
+var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
+var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
+var rxEmptyPort = regexp.MustCompile(`:+$`)
+
+// Map of flags to implementation function.
+// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
+// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
+
+// Since maps have undefined traversing order, make a slice of ordered keys
+var flagsOrder = []NormalizationFlags{
+ FlagLowercaseScheme,
+ FlagLowercaseHost,
+ FlagRemoveDefaultPort,
+ FlagRemoveDirectoryIndex,
+ FlagRemoveDotSegments,
+ FlagRemoveFragment,
+ FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
+ FlagRemoveDuplicateSlashes,
+ FlagRemoveWWW,
+ FlagAddWWW,
+ FlagSortQuery,
+ FlagDecodeDWORDHost,
+ FlagDecodeOctalHost,
+ FlagDecodeHexHost,
+ FlagRemoveUnnecessaryHostDots,
+ FlagRemoveEmptyPortSeparator,
+ FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
+ FlagAddTrailingSlash,
+}
+
+// ... and then the map, where order is unimportant
+var flags = map[NormalizationFlags]func(*url.URL){
+ FlagLowercaseScheme: lowercaseScheme,
+ FlagLowercaseHost: lowercaseHost,
+ FlagRemoveDefaultPort: removeDefaultPort,
+ FlagRemoveDirectoryIndex: removeDirectoryIndex,
+ FlagRemoveDotSegments: removeDotSegments,
+ FlagRemoveFragment: removeFragment,
+ FlagForceHTTP: forceHTTP,
+ FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
+ FlagRemoveWWW: removeWWW,
+ FlagAddWWW: addWWW,
+ FlagSortQuery: sortQuery,
+ FlagDecodeDWORDHost: decodeDWORDHost,
+ FlagDecodeOctalHost: decodeOctalHost,
+ FlagDecodeHexHost: decodeHexHost,
+ FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
+ FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
+ FlagRemoveTrailingSlash: removeTrailingSlash,
+ FlagAddTrailingSlash: addTrailingSlash,
+}
+
+// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
+// It takes an URL string as input, as well as the normalization flags.
+func MustNormalizeURLString(u string, f NormalizationFlags) string {
+ result, e := NormalizeURLString(u, f)
+ if e != nil {
+ panic(e)
+ }
+ return result
+}
+
+// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
+// It takes an URL string as input, as well as the normalization flags.
+func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
+ parsed, err := url.Parse(u)
+ if err != nil {
+ return "", err
+ }
+
+ if f&FlagLowercaseHost == FlagLowercaseHost {
+ parsed.Host = strings.ToLower(parsed.Host)
+ }
+
+ // The idna package doesn't fully conform to RFC 5895
+ // (https://tools.ietf.org/html/rfc5895), so we do it here.
+ // Taken from Go 1.8 cycle source, courtesy of bradfitz.
+ // TODO: Remove when (if?) idna package conforms to RFC 5895.
+ parsed.Host = width.Fold.String(parsed.Host)
+ parsed.Host = norm.NFC.String(parsed.Host)
+ if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
+ return "", err
+ }
+
+ return NormalizeURL(parsed, f), nil
+}
+
+// NormalizeURL returns the normalized string.
+// It takes a parsed URL object as input, as well as the normalization flags.
+func NormalizeURL(u *url.URL, f NormalizationFlags) string {
+ for _, k := range flagsOrder {
+ if f&k == k {
+ flags[k](u)
+ }
+ }
+ return urlesc.Escape(u)
+}
+
+func lowercaseScheme(u *url.URL) {
+ if len(u.Scheme) > 0 {
+ u.Scheme = strings.ToLower(u.Scheme)
+ }
+}
+
+func lowercaseHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ u.Host = strings.ToLower(u.Host)
+ }
+}
+
+func removeDefaultPort(u *url.URL) {
+ if len(u.Host) > 0 {
+ scheme := strings.ToLower(u.Scheme)
+ u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
+ if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
+ return ""
+ }
+ return val
+ })
+ }
+}
+
+func removeTrailingSlash(u *url.URL) {
+ if l := len(u.Path); l > 0 {
+ if strings.HasSuffix(u.Path, "/") {
+ u.Path = u.Path[:l-1]
+ }
+ } else if l = len(u.Host); l > 0 {
+ if strings.HasSuffix(u.Host, "/") {
+ u.Host = u.Host[:l-1]
+ }
+ }
+}
+
+func addTrailingSlash(u *url.URL) {
+ if l := len(u.Path); l > 0 {
+ if !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ }
+ } else if l = len(u.Host); l > 0 {
+ if !strings.HasSuffix(u.Host, "/") {
+ u.Host += "/"
+ }
+ }
+}
+
+func removeDotSegments(u *url.URL) {
+ if len(u.Path) > 0 {
+ var dotFree []string
+ var lastIsDot bool
+
+ sections := strings.Split(u.Path, "/")
+ for _, s := range sections {
+ if s == ".." {
+ if len(dotFree) > 0 {
+ dotFree = dotFree[:len(dotFree)-1]
+ }
+ } else if s != "." {
+ dotFree = append(dotFree, s)
+ }
+ lastIsDot = (s == "." || s == "..")
+ }
+ // Special case if host does not end with / and new path does not begin with /
+ u.Path = strings.Join(dotFree, "/")
+ if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
+ u.Path = "/" + u.Path
+ }
+ // Special case if the last segment was a dot, make sure the path ends with a slash
+ if lastIsDot && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ }
+ }
+}
+
+func removeDirectoryIndex(u *url.URL) {
+ if len(u.Path) > 0 {
+ u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
+ }
+}
+
+func removeFragment(u *url.URL) {
+ u.Fragment = ""
+}
+
+func forceHTTP(u *url.URL) {
+ if strings.ToLower(u.Scheme) == "https" {
+ u.Scheme = "http"
+ }
+}
+
+func removeDuplicateSlashes(u *url.URL) {
+ if len(u.Path) > 0 {
+ u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
+ }
+}
+
+func removeWWW(u *url.URL) {
+ if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+ u.Host = u.Host[4:]
+ }
+}
+
+func addWWW(u *url.URL) {
+ if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+ u.Host = "www." + u.Host
+ }
+}
+
+func sortQuery(u *url.URL) {
+ q := u.Query()
+
+ if len(q) > 0 {
+ arKeys := make([]string, len(q))
+ i := 0
+ for k, _ := range q {
+ arKeys[i] = k
+ i++
+ }
+ sort.Strings(arKeys)
+ buf := new(bytes.Buffer)
+ for _, k := range arKeys {
+ sort.Strings(q[k])
+ for _, v := range q[k] {
+ if buf.Len() > 0 {
+ buf.WriteRune('&')
+ }
+ buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
+ }
+ }
+
+ // Rebuild the raw query string
+ u.RawQuery = buf.String()
+ }
+}
+
+func decodeDWORDHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+ var parts [4]int64
+
+ dword, _ := strconv.ParseInt(matches[1], 10, 0)
+ for i, shift := range []uint{24, 16, 8, 0} {
+ parts[i] = dword >> shift & 0xFF
+ }
+ u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
+ }
+ }
+}
+
+func decodeOctalHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
+ var parts [4]int64
+
+ for i := 1; i <= 4; i++ {
+ parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
+ }
+ u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
+ }
+ }
+}
+
+func decodeHexHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+ // Conversion is safe because of regex validation
+ parsed, _ := strconv.ParseInt(matches[1], 16, 0)
+ // Set host as DWORD (base 10) encoded host
+ u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
+ // The rest is the same as decoding a DWORD host
+ decodeDWORDHost(u)
+ }
+ }
+}
+
+func removeUnncessaryHostDots(u *url.URL) {
+ if len(u.Host) > 0 {
+ if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
+ // Trim the leading and trailing dots
+ u.Host = strings.Trim(matches[1], ".")
+ if len(matches) > 2 {
+ u.Host += matches[2]
+ }
+ }
+ }
+}
+
+func removeEmptyPortSeparator(u *url.URL) {
+ if len(u.Host) > 0 {
+ u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
new file mode 100644
index 000000000..744875676
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md
new file mode 100644
index 000000000..bebe305e0
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/README.md
@@ -0,0 +1,16 @@
+urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
+======
+
+Package urlesc implements query escaping as per RFC 3986.
+
+It contains some parts of the net/url package, modified so as to allow
+some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
+
+## Install
+
+ go get github.com/PuerkitoBio/urlesc
+
+## License
+
+Go license (BSD-3-Clause)
+
diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
new file mode 100644
index 000000000..1b8462459
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
@@ -0,0 +1,180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package urlesc implements query escaping as per RFC 3986.
+// It contains some parts of the net/url package, modified so as to allow
+// some reserved characters incorrectly escaped by net/url.
+// See https://github.com/golang/go/issues/5684
+package urlesc
+
+import (
+ "bytes"
+ "net/url"
+ "strings"
+)
+
+type encoding int
+
+const (
+ encodePath encoding = 1 + iota
+ encodeUserPassword
+ encodeQueryComponent
+ encodeFragment
+)
+
+// Return true if the specified character should be escaped when
+// appearing in a URL string, according to RFC 3986.
+func shouldEscape(c byte, mode encoding) bool {
+ // §2.3 Unreserved characters (alphanum)
+ if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
+ return false
+ }
+
+ switch c {
+ case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
+ return false
+
+ // §2.2 Reserved characters (reserved)
+ case ':', '/', '?', '#', '[', ']', '@', // gen-delims
+ '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
+ // Different sections of the URL allow a few of
+ // the reserved characters to appear unescaped.
+ switch mode {
+ case encodePath: // §3.3
+ // The RFC allows sub-delims and : @.
+ // '/', '[' and ']' can be used to assign meaning to individual path
+ // segments. This package only manipulates the path as a whole,
+ // so we allow those as well. That leaves only ? and # to escape.
+ return c == '?' || c == '#'
+
+ case encodeUserPassword: // §3.2.1
+ // The RFC allows : and sub-delims in
+ // userinfo. The parsing of userinfo treats ':' as special so we must escape
+ // all the gen-delims.
+ return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
+
+ case encodeQueryComponent: // §3.4
+ // The RFC allows / and ?.
+ return c != '/' && c != '?'
+
+ case encodeFragment: // §4.1
+ // The RFC text is silent but the grammar allows
+ // everything, so escape nothing but #
+ return c == '#'
+ }
+ }
+
+ // Everything else must be escaped.
+ return true
+}
+
+// QueryEscape escapes the string so it can be safely placed
+// inside a URL query.
+func QueryEscape(s string) string {
+ return escape(s, encodeQueryComponent)
+}
+
+func escape(s string, mode encoding) string {
+ spaceCount, hexCount := 0, 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c, mode) {
+ if c == ' ' && mode == encodeQueryComponent {
+ spaceCount++
+ } else {
+ hexCount++
+ }
+ }
+ }
+
+ if spaceCount == 0 && hexCount == 0 {
+ return s
+ }
+
+ t := make([]byte, len(s)+2*hexCount)
+ j := 0
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; {
+ case c == ' ' && mode == encodeQueryComponent:
+ t[j] = '+'
+ j++
+ case shouldEscape(c, mode):
+ t[j] = '%'
+ t[j+1] = "0123456789ABCDEF"[c>>4]
+ t[j+2] = "0123456789ABCDEF"[c&15]
+ j += 3
+ default:
+ t[j] = s[i]
+ j++
+ }
+ }
+ return string(t)
+}
+
+var uiReplacer = strings.NewReplacer(
+ "%21", "!",
+ "%27", "'",
+ "%28", "(",
+ "%29", ")",
+ "%2A", "*",
+)
+
+// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
+func unescapeUserinfo(s string) string {
+ return uiReplacer.Replace(s)
+}
+
+// Escape reassembles the URL into a valid URL string.
+// The general form of the result is one of:
+//
+// scheme:opaque
+// scheme://userinfo@host/path?query#fragment
+//
+// If u.Opaque is non-empty, String uses the first form;
+// otherwise it uses the second form.
+//
+// In the second form, the following rules apply:
+// - if u.Scheme is empty, scheme: is omitted.
+// - if u.User is nil, userinfo@ is omitted.
+// - if u.Host is empty, host/ is omitted.
+// - if u.Scheme and u.Host are empty and u.User is nil,
+// the entire scheme://userinfo@host/ is omitted.
+// - if u.Host is non-empty and u.Path begins with a /,
+// the form host/path does not add its own /.
+// - if u.RawQuery is empty, ?query is omitted.
+// - if u.Fragment is empty, #fragment is omitted.
+func Escape(u *url.URL) string {
+ var buf bytes.Buffer
+ if u.Scheme != "" {
+ buf.WriteString(u.Scheme)
+ buf.WriteByte(':')
+ }
+ if u.Opaque != "" {
+ buf.WriteString(u.Opaque)
+ } else {
+ if u.Scheme != "" || u.Host != "" || u.User != nil {
+ buf.WriteString("//")
+ if ui := u.User; ui != nil {
+ buf.WriteString(unescapeUserinfo(ui.String()))
+ buf.WriteByte('@')
+ }
+ if h := u.Host; h != "" {
+ buf.WriteString(h)
+ }
+ }
+ if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
+ buf.WriteByte('/')
+ }
+ buf.WriteString(escape(u.Path, encodePath))
+ }
+ if u.RawQuery != "" {
+ buf.WriteByte('?')
+ buf.WriteString(u.RawQuery)
+ }
+ if u.Fragment != "" {
+ buf.WriteByte('#')
+ buf.WriteString(escape(u.Fragment, encodeFragment))
+ }
+ return buf.String()
+}