summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/mailru/easyjson/LICENSE7
-rw-r--r--vendor/github.com/mailru/easyjson/README.md333
-rw-r--r--vendor/github.com/mailru/easyjson/buffer/pool.go270
-rw-r--r--vendor/github.com/mailru/easyjson/helpers.go78
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/bytestostr.go24
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go13
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/error.go15
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/lexer.go1181
-rw-r--r--vendor/github.com/mailru/easyjson/jwriter/writer.go390
-rw-r--r--vendor/github.com/mailru/easyjson/raw.go45
10 files changed, 0 insertions, 2356 deletions
diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE
deleted file mode 100644
index fbff658f7..000000000
--- a/vendor/github.com/mailru/easyjson/LICENSE
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright (c) 2016 Mail.Ru Group
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md
deleted file mode 100644
index 7fd768654..000000000
--- a/vendor/github.com/mailru/easyjson/README.md
+++ /dev/null
@@ -1,333 +0,0 @@
-# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson)
-
-Package easyjson provides a fast and easy way to marshal/unmarshal Go structs
-to/from JSON without the use of reflection. In performance tests, easyjson
-outperforms the standard `encoding/json` package by a factor of 4-5x, and other
-JSON encoding packages by a factor of 2-3x.
-
-easyjson aims to keep generated Go code simple enough so that it can be easily
-optimized or fixed. Another goal is to provide users with the ability to
-customize the generated code by providing options not available with the
-standard `encoding/json` package, such as generating "snake_case" names or
-enabling `omitempty` behavior by default.
-
-## Usage
-```sh
-# install
-go get -u github.com/mailru/easyjson/...
-
-# run
-easyjson -all <file>.go
-```
-
-The above will generate `<file>_easyjson.go` containing the appropriate marshaler and
-unmarshaler funcs for all structs contained in `<file>.go`.
-
-Please note that easyjson requires a full Go build environment and the `GOPATH`
-environment variable to be set. This is because easyjson code generation
-invokes `go run` on a temporary file (an approach to code generation borrowed
-from [ffjson](https://github.com/pquerna/ffjson)).
-
-## Options
-```txt
-Usage of easyjson:
- -all
- generate marshaler/unmarshalers for all structs in a file
- -build_tags string
- build tags to add to generated file
- -leave_temps
- do not delete temporary files
- -no_std_marshalers
- don't generate MarshalJSON/UnmarshalJSON funcs
- -noformat
- do not run 'gofmt -w' on output file
- -omit_empty
- omit empty fields by default
- -output_filename string
- specify the filename of the output
- -pkg
- process the whole package instead of just the given file
- -snake_case
- use snake_case names instead of CamelCase by default
- -lower_camel_case
- use lowerCamelCase instead of CamelCase by default
- -stubs
- only generate stubs for marshaler/unmarshaler funcs
- -disallow_unknown_fields
- return error if some unknown field in json appeared
-```
-
-Using `-all` will generate marshalers/unmarshalers for all Go structs in the
-file. If `-all` is not provided, then only those structs whose preceding
-comment starts with `easyjson:json` will have marshalers/unmarshalers
-generated. For example:
-
-```go
-//easyjson:json
-type A struct {}
-```
-
-Additional option notes:
-
-* `-snake_case` tells easyjson to generate snake\_case field names by default
- (unless overridden by a field tag). The CamelCase to snake\_case conversion
- algorithm should work in most cases (ie, HTTPVersion will be converted to
- "http_version").
-
-* `-build_tags` will add the specified build tags to generated Go sources.
-
-## Generated Marshaler/Unmarshaler Funcs
-
-For Go struct types, easyjson generates the funcs `MarshalEasyJSON` /
-`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify
-the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in
-conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary
-reflection / type assertions during marshaling/unmarshaling to/from JSON for Go
-structs.
-
-easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct
-types compatible with the standard `json.Marshaler` and `json.Unmarshaler`
-interfaces. Please be aware that using the standard `json.Marshal` /
-`json.Unmarshal` for marshaling/unmarshaling will incur a significant
-performance penalty when compared to using `easyjson.Marshal` /
-`easyjson.Unmarshal`.
-
-Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and
-`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers
-and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter`
-which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc
-listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of
-utility funcs that are available.
-
-## Controlling easyjson Marshaling and Unmarshaling Behavior
-
-Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs
-that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces.
-These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined
-for a Go type.
-
-Go types can also satisify the `easyjson.Optional` interface, which allows the
-type to define its own `omitempty` logic.
-
-## Type Wrappers
-
-easyjson provides additional type wrappers defined in the `easyjson/opt`
-package. These wrap the standard Go primitives and in turn satisify the
-easyjson interfaces.
-
-The `easyjson/opt` type wrappers are useful when needing to distinguish between
-a missing value and/or when needing to specifying a default value. Type
-wrappers allow easyjson to avoid additional pointers and heap allocations and
-can significantly increase performance when used properly.
-
-## Memory Pooling
-
-easyjson uses a buffer pool that allocates data in increasing chunks from 128
-to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of
-`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory
-allocation and to allow larger reusable buffers.
-
-easyjson's custom allocation buffer pool is defined in the `easyjson/buffer`
-package, and the default behavior pool behavior can be modified (if necessary)
-through a call to `buffer.Init()` prior to any marshaling or unmarshaling.
-Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer)
-for more information.
-
-## Issues, Notes, and Limitations
-
-* easyjson is still early in its development. As such, there are likely to be
- bugs and missing features when compared to `encoding/json`. In the case of a
- missing feature or bug, please create a GitHub issue. Pull requests are
- welcome!
-
-* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive
- matching is not currently provided due to the significant performance hit
- when doing case-insensitive key matching. In the future, case-insensitive
- object key matching may be provided via an option to the generator.
-
-* easyjson makes use of `unsafe`, which simplifies the code and
- provides significant performance benefits by allowing no-copy
- conversion from `[]byte` to `string`. That said, `unsafe` is used
- only when unmarshaling and parsing JSON, and any `unsafe` operations
- / memory allocations done will be safely deallocated by
- easyjson. Set the build tag `easyjson_nounsafe` to compile it
- without `unsafe`.
-
-* easyjson is compatible with Google App Engine. The `appengine` build
- tag (set by App Engine's environment) will automatically disable the
- use of `unsafe`, which is not allowed in App Engine's Standard
- Environment. Note that the use with App Engine is still experimental.
-
-* Floats are formatted using the default precision from Go's `strconv` package.
- As such, easyjson will not correctly handle high precision floats when
- marshaling/unmarshaling JSON. Note, however, that there are very few/limited
- uses where this behavior is not sufficient for general use. That said, a
- different package may be needed if precise marshaling/unmarshaling of high
- precision floats to/from JSON is required.
-
-* While unmarshaling, the JSON parser does the minimal amount of work needed to
- skip over unmatching parens, and as such full validation is not done for the
- entire JSON value being unmarshaled/parsed.
-
-* Currently there is no true streaming support for encoding/decoding as
- typically for many uses/protocols the final, marshaled length of the JSON
- needs to be known prior to sending the data. Currently this is not possible
- with easyjson's architecture.
-
-## Benchmarks
-
-Most benchmarks were done using the example
-[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets)
-(9k after eliminating whitespace). This example is similar to real-world data,
-is well-structured, and contains a healthy variety of different types, making
-it ideal for JSON serialization benchmarks.
-
-Note:
-
-* For small request benchmarks, an 80 byte portion of the above example was
- used.
-
-* For large request marshaling benchmarks, a struct containing 50 regular
- samples was used, making a ~500kB output JSON.
-
-* Benchmarks are showing the results of easyjson's default behaviour,
- which makes use of `unsafe`.
-
-Benchmarks are available in the repository and can be run by invoking `make`.
-
-### easyjson vs. encoding/json
-
-easyjson is roughly 5-6 times faster than the standard `encoding/json` for
-unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent
-marshaling is 6-7x faster if marshaling to a writer.
-
-### easyjson vs. ffjson
-
-easyjson uses the same approach for JSON marshaling as
-[ffjson](https://github.com/pquerna/ffjson), but takes a significantly
-different approach to lexing and parsing JSON during unmarshaling. This means
-easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for
-non-concurrent unmarshaling.
-
-As of this writing, `ffjson` seems to have issues when used concurrently:
-specifically, large request pooling hurts `ffjson`'s performance and causes
-scalability issues. These issues with `ffjson` can likely be fixed, but as of
-writing remain outstanding/known issues with `ffjson`.
-
-easyjson and `ffjson` have similar performance for small requests, however
-easyjson outperforms `ffjson` by roughly 2-5x times for large requests when
-used with a writer.
-
-### easyjson vs. go/codec
-
-[go/codec](https://github.com/ugorji/go) provides
-compile-time helpers for JSON generation. In this case, helpers do not work
-like marshalers as they are encoding-independent.
-
-easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks
-and about 3x faster for concurrent encoding (without marshaling to a writer).
-
-In an attempt to measure marshaling performance of `go/codec` (as opposed to
-allocations/memcpy/writer interface invocations), a benchmark was done with
-resetting length of a byte slice rather than resetting the whole slice to nil.
-However, the optimization in this exact form may not be applicable in practice,
-since the memory is not freed between marshaling operations.
-
-### easyjson vs 'ujson' python module
-
-[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it
-is interesting to see how plain golang compares to that. It is imporant to note
-that the resulting object for python is slower to access, since the library
-parses JSON object into dictionaries.
-
-easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for
-marshaling.
-
-### Benchmark Results
-
-`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6.
-`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6.
-
-#### Unmarshaling
-
-| lib | json size | MB/s | allocs/op | B/op |
-|:---------|:----------|-----:|----------:|------:|
-| standard | regular | 22 | 218 | 10229 |
-| standard | small | 9.7 | 14 | 720 |
-| | | | | |
-| easyjson | regular | 125 | 128 | 9794 |
-| easyjson | small | 67 | 3 | 128 |
-| | | | | |
-| ffjson | regular | 66 | 141 | 9985 |
-| ffjson | small | 17.6 | 10 | 488 |
-| | | | | |
-| codec | regular | 55 | 434 | 19299 |
-| codec | small | 29 | 7 | 336 |
-| | | | | |
-| ujson | regular | 103 | N/A | N/A |
-
-#### Marshaling, one goroutine.
-
-| lib | json size | MB/s | allocs/op | B/op |
-|:----------|:----------|-----:|----------:|------:|
-| standard | regular | 75 | 9 | 23256 |
-| standard | small | 32 | 3 | 328 |
-| standard | large | 80 | 17 | 1.2M |
-| | | | | |
-| easyjson | regular | 213 | 9 | 10260 |
-| easyjson* | regular | 263 | 8 | 742 |
-| easyjson | small | 125 | 1 | 128 |
-| easyjson | large | 212 | 33 | 490k |
-| easyjson* | large | 262 | 25 | 2879 |
-| | | | | |
-| ffjson | regular | 122 | 153 | 21340 |
-| ffjson** | regular | 146 | 152 | 4897 |
-| ffjson | small | 36 | 5 | 384 |
-| ffjson** | small | 64 | 4 | 128 |
-| ffjson | large | 134 | 7317 | 818k |
-| ffjson** | large | 125 | 7320 | 827k |
-| | | | | |
-| codec | regular | 80 | 17 | 33601 |
-| codec*** | regular | 108 | 9 | 1153 |
-| codec | small | 42 | 3 | 304 |
-| codec*** | small | 56 | 1 | 48 |
-| codec | large | 73 | 483 | 2.5M |
-| codec*** | large | 103 | 451 | 66007 |
-| | | | | |
-| ujson | regular | 92 | N/A | N/A |
-
-\* marshaling to a writer,
-\*\* using `ffjson.Pool()`,
-\*\*\* reusing output slice instead of resetting it to nil
-
-#### Marshaling, concurrent.
-
-| lib | json size | MB/s | allocs/op | B/op |
-|:----------|:----------|-----:|----------:|------:|
-| standard | regular | 252 | 9 | 23257 |
-| standard | small | 124 | 3 | 328 |
-| standard | large | 289 | 17 | 1.2M |
-| | | | | |
-| easyjson | regular | 792 | 9 | 10597 |
-| easyjson* | regular | 1748 | 8 | 779 |
-| easyjson | small | 333 | 1 | 128 |
-| easyjson | large | 718 | 36 | 548k |
-| easyjson* | large | 2134 | 25 | 4957 |
-| | | | | |
-| ffjson | regular | 301 | 153 | 21629 |
-| ffjson** | regular | 707 | 152 | 5148 |
-| ffjson | small | 62 | 5 | 384 |
-| ffjson** | small | 282 | 4 | 128 |
-| ffjson | large | 438 | 7330 | 1.0M |
-| ffjson** | large | 131 | 7319 | 820k |
-| | | | | |
-| codec | regular | 183 | 17 | 33603 |
-| codec*** | regular | 671 | 9 | 1157 |
-| codec | small | 147 | 3 | 304 |
-| codec*** | small | 299 | 1 | 48 |
-| codec | large | 190 | 483 | 2.5M |
-| codec*** | large | 752 | 451 | 77574 |
-
-\* marshaling to a writer,
-\*\* using `ffjson.Pool()`,
-\*\*\* reusing output slice instead of resetting it to nil
diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go
deleted file mode 100644
index 07fb4bc1f..000000000
--- a/vendor/github.com/mailru/easyjson/buffer/pool.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
-// reduce copying and to allow reuse of individual chunks.
-package buffer
-
-import (
- "io"
- "sync"
-)
-
-// PoolConfig contains configuration for the allocation and reuse strategy.
-type PoolConfig struct {
- StartSize int // Minimum chunk size that is allocated.
- PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
- MaxSize int // Maximum chunk size that will be allocated.
-}
-
-var config = PoolConfig{
- StartSize: 128,
- PooledSize: 512,
- MaxSize: 32768,
-}
-
-// Reuse pool: chunk size -> pool.
-var buffers = map[int]*sync.Pool{}
-
-func initBuffers() {
- for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
- buffers[l] = new(sync.Pool)
- }
-}
-
-func init() {
- initBuffers()
-}
-
-// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
-func Init(cfg PoolConfig) {
- config = cfg
- initBuffers()
-}
-
-// putBuf puts a chunk to reuse pool if it can be reused.
-func putBuf(buf []byte) {
- size := cap(buf)
- if size < config.PooledSize {
- return
- }
- if c := buffers[size]; c != nil {
- c.Put(buf[:0])
- }
-}
-
-// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
-func getBuf(size int) []byte {
- if size < config.PooledSize {
- return make([]byte, 0, size)
- }
-
- if c := buffers[size]; c != nil {
- v := c.Get()
- if v != nil {
- return v.([]byte)
- }
- }
- return make([]byte, 0, size)
-}
-
-// Buffer is a buffer optimized for serialization without extra copying.
-type Buffer struct {
-
- // Buf is the current chunk that can be used for serialization.
- Buf []byte
-
- toPool []byte
- bufs [][]byte
-}
-
-// EnsureSpace makes sure that the current chunk contains at least s free bytes,
-// possibly creating a new chunk.
-func (b *Buffer) EnsureSpace(s int) {
- if cap(b.Buf)-len(b.Buf) >= s {
- return
- }
- l := len(b.Buf)
- if l > 0 {
- if cap(b.toPool) != cap(b.Buf) {
- // Chunk was reallocated, toPool can be pooled.
- putBuf(b.toPool)
- }
- if cap(b.bufs) == 0 {
- b.bufs = make([][]byte, 0, 8)
- }
- b.bufs = append(b.bufs, b.Buf)
- l = cap(b.toPool) * 2
- } else {
- l = config.StartSize
- }
-
- if l > config.MaxSize {
- l = config.MaxSize
- }
- b.Buf = getBuf(l)
- b.toPool = b.Buf
-}
-
-// AppendByte appends a single byte to buffer.
-func (b *Buffer) AppendByte(data byte) {
- if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
- b.EnsureSpace(1)
- }
- b.Buf = append(b.Buf, data)
-}
-
-// AppendBytes appends a byte slice to buffer.
-func (b *Buffer) AppendBytes(data []byte) {
- for len(data) > 0 {
- if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
- b.EnsureSpace(1)
- }
-
- sz := cap(b.Buf) - len(b.Buf)
- if sz > len(data) {
- sz = len(data)
- }
-
- b.Buf = append(b.Buf, data[:sz]...)
- data = data[sz:]
- }
-}
-
-// AppendBytes appends a string to buffer.
-func (b *Buffer) AppendString(data string) {
- for len(data) > 0 {
- if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
- b.EnsureSpace(1)
- }
-
- sz := cap(b.Buf) - len(b.Buf)
- if sz > len(data) {
- sz = len(data)
- }
-
- b.Buf = append(b.Buf, data[:sz]...)
- data = data[sz:]
- }
-}
-
-// Size computes the size of a buffer by adding sizes of every chunk.
-func (b *Buffer) Size() int {
- size := len(b.Buf)
- for _, buf := range b.bufs {
- size += len(buf)
- }
- return size
-}
-
-// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
-func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
- var n int
- for _, buf := range b.bufs {
- if err == nil {
- n, err = w.Write(buf)
- written += n
- }
- putBuf(buf)
- }
-
- if err == nil {
- n, err = w.Write(b.Buf)
- written += n
- }
- putBuf(b.toPool)
-
- b.bufs = nil
- b.Buf = nil
- b.toPool = nil
-
- return
-}
-
-// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
-// copied if it does not fit in a single chunk. You can optionally provide one byte
-// slice as argument that it will try to reuse.
-func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
- if len(b.bufs) == 0 {
- ret := b.Buf
- b.toPool = nil
- b.Buf = nil
- return ret
- }
-
- var ret []byte
- size := b.Size()
-
- // If we got a buffer as argument and it is big enought, reuse it.
- if len(reuse) == 1 && cap(reuse[0]) >= size {
- ret = reuse[0][:0]
- } else {
- ret = make([]byte, 0, size)
- }
- for _, buf := range b.bufs {
- ret = append(ret, buf...)
- putBuf(buf)
- }
-
- ret = append(ret, b.Buf...)
- putBuf(b.toPool)
-
- b.bufs = nil
- b.toPool = nil
- b.Buf = nil
-
- return ret
-}
-
-type readCloser struct {
- offset int
- bufs [][]byte
-}
-
-func (r *readCloser) Read(p []byte) (n int, err error) {
- for _, buf := range r.bufs {
- // Copy as much as we can.
- x := copy(p[n:], buf[r.offset:])
- n += x // Increment how much we filled.
-
- // Did we empty the whole buffer?
- if r.offset+x == len(buf) {
- // On to the next buffer.
- r.offset = 0
- r.bufs = r.bufs[1:]
-
- // We can release this buffer.
- putBuf(buf)
- } else {
- r.offset += x
- }
-
- if n == len(p) {
- break
- }
- }
- // No buffers left or nothing read?
- if len(r.bufs) == 0 {
- err = io.EOF
- }
- return
-}
-
-func (r *readCloser) Close() error {
- // Release all remaining buffers.
- for _, buf := range r.bufs {
- putBuf(buf)
- }
- // In case Close gets called multiple times.
- r.bufs = nil
-
- return nil
-}
-
-// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
-func (b *Buffer) ReadCloser() io.ReadCloser {
- ret := &readCloser{0, append(b.bufs, b.Buf)}
-
- b.bufs = nil
- b.toPool = nil
- b.Buf = nil
-
- return ret
-}
diff --git a/vendor/github.com/mailru/easyjson/helpers.go b/vendor/github.com/mailru/easyjson/helpers.go
deleted file mode 100644
index b86b87d22..000000000
--- a/vendor/github.com/mailru/easyjson/helpers.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Package easyjson contains marshaler/unmarshaler interfaces and helper functions.
-package easyjson
-
-import (
- "io"
- "io/ioutil"
- "net/http"
- "strconv"
-
- "github.com/mailru/easyjson/jlexer"
- "github.com/mailru/easyjson/jwriter"
-)
-
-// Marshaler is an easyjson-compatible marshaler interface.
-type Marshaler interface {
- MarshalEasyJSON(w *jwriter.Writer)
-}
-
-// Marshaler is an easyjson-compatible unmarshaler interface.
-type Unmarshaler interface {
- UnmarshalEasyJSON(w *jlexer.Lexer)
-}
-
-// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic.
-type Optional interface {
- IsDefined() bool
-}
-
-// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied
-// from a chain of smaller chunks.
-func Marshal(v Marshaler) ([]byte, error) {
- w := jwriter.Writer{}
- v.MarshalEasyJSON(&w)
- return w.BuildBytes()
-}
-
-// MarshalToWriter marshals the data to an io.Writer.
-func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {
- jw := jwriter.Writer{}
- v.MarshalEasyJSON(&jw)
- return jw.DumpTo(w)
-}
-
-// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the
-// http.ResponseWriter, and send the data to the writer. started will be equal to
-// false if an error occurred before any http.ResponseWriter methods were actually
-// invoked (in this case a 500 reply is possible).
-func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) {
- jw := jwriter.Writer{}
- v.MarshalEasyJSON(&jw)
- if jw.Error != nil {
- return false, 0, jw.Error
- }
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Content-Length", strconv.Itoa(jw.Size()))
-
- started = true
- written, err = jw.DumpTo(w)
- return
-}
-
-// Unmarshal decodes the JSON in data into the object.
-func Unmarshal(data []byte, v Unmarshaler) error {
- l := jlexer.Lexer{Data: data}
- v.UnmarshalEasyJSON(&l)
- return l.Error()
-}
-
-// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object.
-func UnmarshalFromReader(r io.Reader, v Unmarshaler) error {
- data, err := ioutil.ReadAll(r)
- if err != nil {
- return err
- }
- l := jlexer.Lexer{Data: data}
- v.UnmarshalEasyJSON(&l)
- return l.Error()
-}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
deleted file mode 100644
index ff7b27c5b..000000000
--- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// This file will only be included to the build if neither
-// easyjson_nounsafe nor appengine build tag is set. See README notes
-// for more details.
-
-//+build !easyjson_nounsafe
-//+build !appengine
-
-package jlexer
-
-import (
- "reflect"
- "unsafe"
-)
-
-// bytesToStr creates a string pointing at the slice to avoid copying.
-//
-// Warning: the string returned by the function should be used with care, as the whole input data
-// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
-// may be garbage-collected even when the string exists.
-func bytesToStr(data []byte) string {
- h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
- shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
- return *(*string)(unsafe.Pointer(&shdr))
-}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
deleted file mode 100644
index 864d1be67..000000000
--- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// This file is included to the build if any of the buildtags below
-// are defined. Refer to README notes for more details.
-
-//+build easyjson_nounsafe appengine
-
-package jlexer
-
-// bytesToStr creates a string normally from []byte
-//
-// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
-func bytesToStr(data []byte) string {
- return string(data)
-}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go
deleted file mode 100644
index e90ec40d0..000000000
--- a/vendor/github.com/mailru/easyjson/jlexer/error.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package jlexer
-
-import "fmt"
-
-// LexerError implements the error interface and represents all possible errors that can be
-// generated during parsing the JSON data.
-type LexerError struct {
- Reason string
- Offset int
- Data string
-}
-
-func (l *LexerError) Error() string {
- return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
-}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
deleted file mode 100644
index 51f056615..000000000
--- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go
+++ /dev/null
@@ -1,1181 +0,0 @@
-// Package jlexer contains a JSON lexer implementation.
-//
-// It is expected that it is mostly used with generated parser code, so the interface is tuned
-// for a parser that knows what kind of data is expected.
-package jlexer
-
-import (
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "strconv"
- "unicode"
- "unicode/utf16"
- "unicode/utf8"
-)
-
-// tokenKind determines type of a token.
-type tokenKind byte
-
-const (
- tokenUndef tokenKind = iota // No token.
- tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
- tokenString // A string literal, e.g. "abc\u1234"
- tokenNumber // Number literal, e.g. 1.5e5
- tokenBool // Boolean literal: true or false.
- tokenNull // null keyword.
-)
-
-// token describes a single token: type, position in the input and value.
-type token struct {
- kind tokenKind // Type of a token.
-
- boolValue bool // Value if a boolean literal token.
- byteValue []byte // Raw value of a token.
- delimValue byte
-}
-
-// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
-type Lexer struct {
- Data []byte // Input data given to the lexer.
-
- start int // Start of the current token.
- pos int // Current unscanned position in the input stream.
- token token // Last scanned token, if token.kind != tokenUndef.
-
- firstElement bool // Whether current element is the first in array or an object.
- wantSep byte // A comma or a colon character, which need to occur before a token.
-
- UseMultipleErrors bool // If we want to use multiple errors.
- fatalError error // Fatal error occurred during lexing. It is usually a syntax error.
- multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
-}
-
-// FetchToken scans the input for the next token.
-func (r *Lexer) FetchToken() {
- r.token.kind = tokenUndef
- r.start = r.pos
-
- // Check if r.Data has r.pos element
- // If it doesn't, it mean corrupted input data
- if len(r.Data) < r.pos {
- r.errParse("Unexpected end of data")
- return
- }
- // Determine the type of a token by skipping whitespace and reading the
- // first character.
- for _, c := range r.Data[r.pos:] {
- switch c {
- case ':', ',':
- if r.wantSep == c {
- r.pos++
- r.start++
- r.wantSep = 0
- } else {
- r.errSyntax()
- }
-
- case ' ', '\t', '\r', '\n':
- r.pos++
- r.start++
-
- case '"':
- if r.wantSep != 0 {
- r.errSyntax()
- }
-
- r.token.kind = tokenString
- r.fetchString()
- return
-
- case '{', '[':
- if r.wantSep != 0 {
- r.errSyntax()
- }
- r.firstElement = true
- r.token.kind = tokenDelim
- r.token.delimValue = r.Data[r.pos]
- r.pos++
- return
-
- case '}', ']':
- if !r.firstElement && (r.wantSep != ',') {
- r.errSyntax()
- }
- r.wantSep = 0
- r.token.kind = tokenDelim
- r.token.delimValue = r.Data[r.pos]
- r.pos++
- return
-
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
- if r.wantSep != 0 {
- r.errSyntax()
- }
- r.token.kind = tokenNumber
- r.fetchNumber()
- return
-
- case 'n':
- if r.wantSep != 0 {
- r.errSyntax()
- }
-
- r.token.kind = tokenNull
- r.fetchNull()
- return
-
- case 't':
- if r.wantSep != 0 {
- r.errSyntax()
- }
-
- r.token.kind = tokenBool
- r.token.boolValue = true
- r.fetchTrue()
- return
-
- case 'f':
- if r.wantSep != 0 {
- r.errSyntax()
- }
-
- r.token.kind = tokenBool
- r.token.boolValue = false
- r.fetchFalse()
- return
-
- default:
- r.errSyntax()
- return
- }
- }
- r.fatalError = io.EOF
- return
-}
-
-// isTokenEnd returns true if the char can follow a non-delimiter token
-func isTokenEnd(c byte) bool {
- return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
-}
-
-// fetchNull fetches and checks remaining bytes of null keyword.
-func (r *Lexer) fetchNull() {
- r.pos += 4
- if r.pos > len(r.Data) ||
- r.Data[r.pos-3] != 'u' ||
- r.Data[r.pos-2] != 'l' ||
- r.Data[r.pos-1] != 'l' ||
- (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
-
- r.pos -= 4
- r.errSyntax()
- }
-}
-
-// fetchTrue fetches and checks remaining bytes of true keyword.
-func (r *Lexer) fetchTrue() {
- r.pos += 4
- if r.pos > len(r.Data) ||
- r.Data[r.pos-3] != 'r' ||
- r.Data[r.pos-2] != 'u' ||
- r.Data[r.pos-1] != 'e' ||
- (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
-
- r.pos -= 4
- r.errSyntax()
- }
-}
-
-// fetchFalse fetches and checks remaining bytes of false keyword.
-func (r *Lexer) fetchFalse() {
- r.pos += 5
- if r.pos > len(r.Data) ||
- r.Data[r.pos-4] != 'a' ||
- r.Data[r.pos-3] != 'l' ||
- r.Data[r.pos-2] != 's' ||
- r.Data[r.pos-1] != 'e' ||
- (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
-
- r.pos -= 5
- r.errSyntax()
- }
-}
-
-// fetchNumber scans a number literal token.
-func (r *Lexer) fetchNumber() {
- hasE := false
- afterE := false
- hasDot := false
-
- r.pos++
- for i, c := range r.Data[r.pos:] {
- switch {
- case c >= '0' && c <= '9':
- afterE = false
- case c == '.' && !hasDot:
- hasDot = true
- case (c == 'e' || c == 'E') && !hasE:
- hasE = true
- hasDot = true
- afterE = true
- case (c == '+' || c == '-') && afterE:
- afterE = false
- default:
- r.pos += i
- if !isTokenEnd(c) {
- r.errSyntax()
- } else {
- r.token.byteValue = r.Data[r.start:r.pos]
- }
- return
- }
- }
-
- r.pos = len(r.Data)
- r.token.byteValue = r.Data[r.start:]
-}
-
-// findStringLen tries to scan into the string literal for ending quote char to determine required size.
-// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
-func findStringLen(data []byte) (isValid, hasEscapes bool, length int) {
- delta := 0
-
- for i := 0; i < len(data); i++ {
- switch data[i] {
- case '\\':
- i++
- delta++
- if i < len(data) && data[i] == 'u' {
- delta++
- }
- case '"':
- return true, (delta > 0), (i - delta)
- }
- }
-
- return false, false, len(data)
-}
-
-// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
-// or it returns -1.
-func getu4(s []byte) rune {
- if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
- return -1
- }
- var val rune
- for i := 2; i < len(s) && i < 6; i++ {
- var v byte
- c := s[i]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- v = c - '0'
- case 'a', 'b', 'c', 'd', 'e', 'f':
- v = c - 'a' + 10
- case 'A', 'B', 'C', 'D', 'E', 'F':
- v = c - 'A' + 10
- default:
- return -1
- }
-
- val <<= 4
- val |= rune(v)
- }
- return val
-}
-
-// processEscape processes a single escape sequence and returns number of bytes processed.
-func (r *Lexer) processEscape(data []byte) (int, error) {
- if len(data) < 2 {
- return 0, fmt.Errorf("syntax error at %v", string(data))
- }
-
- c := data[1]
- switch c {
- case '"', '/', '\\':
- r.token.byteValue = append(r.token.byteValue, c)
- return 2, nil
- case 'b':
- r.token.byteValue = append(r.token.byteValue, '\b')
- return 2, nil
- case 'f':
- r.token.byteValue = append(r.token.byteValue, '\f')
- return 2, nil
- case 'n':
- r.token.byteValue = append(r.token.byteValue, '\n')
- return 2, nil
- case 'r':
- r.token.byteValue = append(r.token.byteValue, '\r')
- return 2, nil
- case 't':
- r.token.byteValue = append(r.token.byteValue, '\t')
- return 2, nil
- case 'u':
- rr := getu4(data)
- if rr < 0 {
- return 0, errors.New("syntax error")
- }
-
- read := 6
- if utf16.IsSurrogate(rr) {
- rr1 := getu4(data[read:])
- if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
- read += 6
- rr = dec
- } else {
- rr = unicode.ReplacementChar
- }
- }
- var d [4]byte
- s := utf8.EncodeRune(d[:], rr)
- r.token.byteValue = append(r.token.byteValue, d[:s]...)
- return read, nil
- }
-
- return 0, errors.New("syntax error")
-}
-
-// fetchString scans a string literal token.
-func (r *Lexer) fetchString() {
- r.pos++
- data := r.Data[r.pos:]
-
- isValid, hasEscapes, length := findStringLen(data)
- if !isValid {
- r.pos += length
- r.errParse("unterminated string literal")
- return
- }
- if !hasEscapes {
- r.token.byteValue = data[:length]
- r.pos += length + 1
- return
- }
-
- r.token.byteValue = make([]byte, 0, length)
- p := 0
- for i := 0; i < len(data); {
- switch data[i] {
- case '"':
- r.pos += i + 1
- r.token.byteValue = append(r.token.byteValue, data[p:i]...)
- i++
- return
-
- case '\\':
- r.token.byteValue = append(r.token.byteValue, data[p:i]...)
- off, err := r.processEscape(data[i:])
- if err != nil {
- r.errParse(err.Error())
- return
- }
- i += off
- p = i
-
- default:
- i++
- }
- }
- r.errParse("unterminated string literal")
-}
-
-// scanToken scans the next token if no token is currently available in the lexer.
-func (r *Lexer) scanToken() {
- if r.token.kind != tokenUndef || r.fatalError != nil {
- return
- }
-
- r.FetchToken()
-}
-
-// consume resets the current token to allow scanning the next one.
-func (r *Lexer) consume() {
- r.token.kind = tokenUndef
- r.token.delimValue = 0
-}
-
-// Ok returns true if no error (including io.EOF) was encountered during scanning.
-func (r *Lexer) Ok() bool {
- return r.fatalError == nil
-}
-
-const maxErrorContextLen = 13
-
-func (r *Lexer) errParse(what string) {
- if r.fatalError == nil {
- var str string
- if len(r.Data)-r.pos <= maxErrorContextLen {
- str = string(r.Data)
- } else {
- str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
- }
- r.fatalError = &LexerError{
- Reason: what,
- Offset: r.pos,
- Data: str,
- }
- }
-}
-
-func (r *Lexer) errSyntax() {
- r.errParse("syntax error")
-}
-
-func (r *Lexer) errInvalidToken(expected string) {
- if r.fatalError != nil {
- return
- }
- if r.UseMultipleErrors {
- r.pos = r.start
- r.consume()
- r.SkipRecursive()
- switch expected {
- case "[":
- r.token.delimValue = ']'
- r.token.kind = tokenDelim
- case "{":
- r.token.delimValue = '}'
- r.token.kind = tokenDelim
- }
- r.addNonfatalError(&LexerError{
- Reason: fmt.Sprintf("expected %s", expected),
- Offset: r.start,
- Data: string(r.Data[r.start:r.pos]),
- })
- return
- }
-
- var str string
- if len(r.token.byteValue) <= maxErrorContextLen {
- str = string(r.token.byteValue)
- } else {
- str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
- }
- r.fatalError = &LexerError{
- Reason: fmt.Sprintf("expected %s", expected),
- Offset: r.pos,
- Data: str,
- }
-}
-
-func (r *Lexer) GetPos() int {
- return r.pos
-}
-
-// Delim consumes a token and verifies that it is the given delimiter.
-func (r *Lexer) Delim(c byte) {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
-
- if !r.Ok() || r.token.delimValue != c {
- r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
- r.errInvalidToken(string([]byte{c}))
- } else {
- r.consume()
- }
-}
-
-// IsDelim returns true if there was no scanning error and next token is the given delimiter.
-func (r *Lexer) IsDelim(c byte) bool {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- return !r.Ok() || r.token.delimValue == c
-}
-
-// Null verifies that the next token is null and consumes it.
-func (r *Lexer) Null() {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- if !r.Ok() || r.token.kind != tokenNull {
- r.errInvalidToken("null")
- }
- r.consume()
-}
-
-// IsNull returns true if the next token is a null keyword.
-func (r *Lexer) IsNull() bool {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- return r.Ok() && r.token.kind == tokenNull
-}
-
-// Skip skips a single token.
-func (r *Lexer) Skip() {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- r.consume()
-}
-
-// SkipRecursive skips next array or object completely, or just skips a single token if not
-// an array/object.
-//
-// Note: no syntax validation is performed on the skipped data.
-func (r *Lexer) SkipRecursive() {
- r.scanToken()
- var start, end byte
-
- if r.token.delimValue == '{' {
- start, end = '{', '}'
- } else if r.token.delimValue == '[' {
- start, end = '[', ']'
- } else {
- r.consume()
- return
- }
-
- r.consume()
-
- level := 1
- inQuotes := false
- wasEscape := false
-
- for i, c := range r.Data[r.pos:] {
- switch {
- case c == start && !inQuotes:
- level++
- case c == end && !inQuotes:
- level--
- if level == 0 {
- r.pos += i + 1
- return
- }
- case c == '\\' && inQuotes:
- wasEscape = !wasEscape
- continue
- case c == '"' && inQuotes:
- inQuotes = wasEscape
- case c == '"':
- inQuotes = true
- }
- wasEscape = false
- }
- r.pos = len(r.Data)
- r.fatalError = &LexerError{
- Reason: "EOF reached while skipping array/object or token",
- Offset: r.pos,
- Data: string(r.Data[r.pos:]),
- }
-}
-
-// Raw fetches the next item recursively as a data slice
-func (r *Lexer) Raw() []byte {
- r.SkipRecursive()
- if !r.Ok() {
- return nil
- }
- return r.Data[r.start:r.pos]
-}
-
-// IsStart returns whether the lexer is positioned at the start
-// of an input string.
-func (r *Lexer) IsStart() bool {
- return r.pos == 0
-}
-
-// Consumed reads all remaining bytes from the input, publishing an error if
-// there is anything but whitespace remaining.
-func (r *Lexer) Consumed() {
- if r.pos > len(r.Data) || !r.Ok() {
- return
- }
-
- for _, c := range r.Data[r.pos:] {
- if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
- r.AddError(&LexerError{
- Reason: "invalid character '" + string(c) + "' after top-level value",
- Offset: r.pos,
- Data: string(r.Data[r.pos:]),
- })
- return
- }
-
- r.pos++
- r.start++
- }
-}
-
-func (r *Lexer) unsafeString() (string, []byte) {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- if !r.Ok() || r.token.kind != tokenString {
- r.errInvalidToken("string")
- return "", nil
- }
- bytes := r.token.byteValue
- ret := bytesToStr(r.token.byteValue)
- r.consume()
- return ret, bytes
-}
-
-// UnsafeString returns the string value if the token is a string literal.
-//
-// Warning: returned string may point to the input buffer, so the string should not outlive
-// the input buffer. Intended pattern of usage is as an argument to a switch statement.
-func (r *Lexer) UnsafeString() string {
- ret, _ := r.unsafeString()
- return ret
-}
-
-// UnsafeBytes returns the byte slice if the token is a string literal.
-func (r *Lexer) UnsafeBytes() []byte {
- _, ret := r.unsafeString()
- return ret
-}
-
-// String reads a string literal.
-func (r *Lexer) String() string {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- if !r.Ok() || r.token.kind != tokenString {
- r.errInvalidToken("string")
- return ""
- }
- ret := string(r.token.byteValue)
- r.consume()
- return ret
-}
-
-// Bytes reads a string literal and base64 decodes it into a byte slice.
-func (r *Lexer) Bytes() []byte {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- if !r.Ok() || r.token.kind != tokenString {
- r.errInvalidToken("string")
- return nil
- }
- ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
- n, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
- if err != nil {
- r.fatalError = &LexerError{
- Reason: err.Error(),
- }
- return nil
- }
-
- r.consume()
- return ret[:n]
-}
-
-// Bool reads a true or false boolean keyword.
-func (r *Lexer) Bool() bool {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- if !r.Ok() || r.token.kind != tokenBool {
- r.errInvalidToken("bool")
- return false
- }
- ret := r.token.boolValue
- r.consume()
- return ret
-}
-
-func (r *Lexer) number() string {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- if !r.Ok() || r.token.kind != tokenNumber {
- r.errInvalidToken("number")
- return ""
- }
- ret := bytesToStr(r.token.byteValue)
- r.consume()
- return ret
-}
-
-func (r *Lexer) Uint8() uint8 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 8)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return uint8(n)
-}
-
-func (r *Lexer) Uint16() uint16 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 16)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return uint16(n)
-}
-
-func (r *Lexer) Uint32() uint32 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return uint32(n)
-}
-
-func (r *Lexer) Uint64() uint64 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return n
-}
-
-func (r *Lexer) Uint() uint {
- return uint(r.Uint64())
-}
-
-func (r *Lexer) Int8() int8 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 8)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return int8(n)
-}
-
-func (r *Lexer) Int16() int16 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 16)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return int16(n)
-}
-
-func (r *Lexer) Int32() int32 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 32)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return int32(n)
-}
-
-func (r *Lexer) Int64() int64 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return n
-}
-
-func (r *Lexer) Int() int {
- return int(r.Int64())
-}
-
-func (r *Lexer) Uint8Str() uint8 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 8)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return uint8(n)
-}
-
-func (r *Lexer) Uint16Str() uint16 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 16)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return uint16(n)
-}
-
-func (r *Lexer) Uint32Str() uint32 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return uint32(n)
-}
-
-func (r *Lexer) Uint64Str() uint64 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return n
-}
-
-func (r *Lexer) UintStr() uint {
- return uint(r.Uint64Str())
-}
-
-func (r *Lexer) UintptrStr() uintptr {
- return uintptr(r.Uint64Str())
-}
-
-func (r *Lexer) Int8Str() int8 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 8)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return int8(n)
-}
-
-func (r *Lexer) Int16Str() int16 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 16)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return int16(n)
-}
-
-func (r *Lexer) Int32Str() int32 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 32)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return int32(n)
-}
-
-func (r *Lexer) Int64Str() int64 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return n
-}
-
-func (r *Lexer) IntStr() int {
- return int(r.Int64Str())
-}
-
-func (r *Lexer) Float32() float32 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseFloat(s, 32)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return float32(n)
-}
-
-func (r *Lexer) Float32Str() float32 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
- n, err := strconv.ParseFloat(s, 32)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return float32(n)
-}
-
-func (r *Lexer) Float64() float64 {
- s := r.number()
- if !r.Ok() {
- return 0
- }
-
- n, err := strconv.ParseFloat(s, 64)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: s,
- })
- }
- return n
-}
-
-func (r *Lexer) Float64Str() float64 {
- s, b := r.unsafeString()
- if !r.Ok() {
- return 0
- }
- n, err := strconv.ParseFloat(s, 64)
- if err != nil {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Reason: err.Error(),
- Data: string(b),
- })
- }
- return n
-}
-
-func (r *Lexer) Error() error {
- return r.fatalError
-}
-
-func (r *Lexer) AddError(e error) {
- if r.fatalError == nil {
- r.fatalError = e
- }
-}
-
-func (r *Lexer) AddNonFatalError(e error) {
- r.addNonfatalError(&LexerError{
- Offset: r.start,
- Data: string(r.Data[r.start:r.pos]),
- Reason: e.Error(),
- })
-}
-
-func (r *Lexer) addNonfatalError(err *LexerError) {
- if r.UseMultipleErrors {
- // We don't want to add errors with the same offset.
- if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
- return
- }
- r.multipleErrors = append(r.multipleErrors, err)
- return
- }
- r.fatalError = err
-}
-
-func (r *Lexer) GetNonFatalErrors() []*LexerError {
- return r.multipleErrors
-}
-
-// JsonNumber fetches and json.Number from 'encoding/json' package.
-// Both int, float or string, contains them are valid values
-func (r *Lexer) JsonNumber() json.Number {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
- if !r.Ok() {
- r.errInvalidToken("json.Number")
- return json.Number("")
- }
-
- switch r.token.kind {
- case tokenString:
- return json.Number(r.String())
- case tokenNumber:
- return json.Number(r.Raw())
- case tokenNull:
- r.Null()
- return json.Number("")
- default:
- r.errSyntax()
- return json.Number("")
- }
-}
-
-// Interface fetches an interface{} analogous to the 'encoding/json' package.
-func (r *Lexer) Interface() interface{} {
- if r.token.kind == tokenUndef && r.Ok() {
- r.FetchToken()
- }
-
- if !r.Ok() {
- return nil
- }
- switch r.token.kind {
- case tokenString:
- return r.String()
- case tokenNumber:
- return r.Float64()
- case tokenBool:
- return r.Bool()
- case tokenNull:
- r.Null()
- return nil
- }
-
- if r.token.delimValue == '{' {
- r.consume()
-
- ret := map[string]interface{}{}
- for !r.IsDelim('}') {
- key := r.String()
- r.WantColon()
- ret[key] = r.Interface()
- r.WantComma()
- }
- r.Delim('}')
-
- if r.Ok() {
- return ret
- } else {
- return nil
- }
- } else if r.token.delimValue == '[' {
- r.consume()
-
- var ret []interface{}
- for !r.IsDelim(']') {
- ret = append(ret, r.Interface())
- r.WantComma()
- }
- r.Delim(']')
-
- if r.Ok() {
- return ret
- } else {
- return nil
- }
- }
- r.errSyntax()
- return nil
-}
-
-// WantComma requires a comma to be present before fetching next token.
-func (r *Lexer) WantComma() {
- r.wantSep = ','
- r.firstElement = false
-}
-
-// WantColon requires a colon to be present before fetching next token.
-func (r *Lexer) WantColon() {
- r.wantSep = ':'
- r.firstElement = false
-}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
deleted file mode 100644
index b9ed7ccaa..000000000
--- a/vendor/github.com/mailru/easyjson/jwriter/writer.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Package jwriter contains a JSON writer.
-package jwriter
-
-import (
- "io"
- "strconv"
- "unicode/utf8"
-
- "github.com/mailru/easyjson/buffer"
-)
-
-// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
-// Flags field in Writer is used to set and pass them around.
-type Flags int
-
-const (
- NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
- NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
-)
-
-// Writer is a JSON writer.
-type Writer struct {
- Flags Flags
-
- Error error
- Buffer buffer.Buffer
- NoEscapeHTML bool
-}
-
-// Size returns the size of the data that was written out.
-func (w *Writer) Size() int {
- return w.Buffer.Size()
-}
-
-// DumpTo outputs the data to given io.Writer, resetting the buffer.
-func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
- return w.Buffer.DumpTo(out)
-}
-
-// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
-// as argument that it will try to reuse.
-func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
- if w.Error != nil {
- return nil, w.Error
- }
-
- return w.Buffer.BuildBytes(reuse...), nil
-}
-
-// ReadCloser returns an io.ReadCloser that can be used to read the data.
-// ReadCloser also resets the buffer.
-func (w *Writer) ReadCloser() (io.ReadCloser, error) {
- if w.Error != nil {
- return nil, w.Error
- }
-
- return w.Buffer.ReadCloser(), nil
-}
-
-// RawByte appends raw binary data to the buffer.
-func (w *Writer) RawByte(c byte) {
- w.Buffer.AppendByte(c)
-}
-
-// RawByte appends raw binary data to the buffer.
-func (w *Writer) RawString(s string) {
- w.Buffer.AppendString(s)
-}
-
-// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
-// calling with results of MarshalJSON-like functions.
-func (w *Writer) Raw(data []byte, err error) {
- switch {
- case w.Error != nil:
- return
- case err != nil:
- w.Error = err
- case len(data) > 0:
- w.Buffer.AppendBytes(data)
- default:
- w.RawString("null")
- }
-}
-
-// RawText encloses raw binary data in quotes and appends in to the buffer.
-// Useful for calling with results of MarshalText-like functions.
-func (w *Writer) RawText(data []byte, err error) {
- switch {
- case w.Error != nil:
- return
- case err != nil:
- w.Error = err
- case len(data) > 0:
- w.String(string(data))
- default:
- w.RawString("null")
- }
-}
-
-// Base64Bytes appends data to the buffer after base64 encoding it
-func (w *Writer) Base64Bytes(data []byte) {
- if data == nil {
- w.Buffer.AppendString("null")
- return
- }
- w.Buffer.AppendByte('"')
- w.base64(data)
- w.Buffer.AppendByte('"')
-}
-
-func (w *Writer) Uint8(n uint8) {
- w.Buffer.EnsureSpace(3)
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
-}
-
-func (w *Writer) Uint16(n uint16) {
- w.Buffer.EnsureSpace(5)
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
-}
-
-func (w *Writer) Uint32(n uint32) {
- w.Buffer.EnsureSpace(10)
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
-}
-
-func (w *Writer) Uint(n uint) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
-}
-
-func (w *Writer) Uint64(n uint64) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
-}
-
-func (w *Writer) Int8(n int8) {
- w.Buffer.EnsureSpace(4)
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
-}
-
-func (w *Writer) Int16(n int16) {
- w.Buffer.EnsureSpace(6)
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
-}
-
-func (w *Writer) Int32(n int32) {
- w.Buffer.EnsureSpace(11)
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
-}
-
-func (w *Writer) Int(n int) {
- w.Buffer.EnsureSpace(21)
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
-}
-
-func (w *Writer) Int64(n int64) {
- w.Buffer.EnsureSpace(21)
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
-}
-
-func (w *Writer) Uint8Str(n uint8) {
- w.Buffer.EnsureSpace(3)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Uint16Str(n uint16) {
- w.Buffer.EnsureSpace(5)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Uint32Str(n uint32) {
- w.Buffer.EnsureSpace(10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) UintStr(n uint) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Uint64Str(n uint64) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) UintptrStr(n uintptr) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Int8Str(n int8) {
- w.Buffer.EnsureSpace(4)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Int16Str(n int16) {
- w.Buffer.EnsureSpace(6)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Int32Str(n int32) {
- w.Buffer.EnsureSpace(11)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) IntStr(n int) {
- w.Buffer.EnsureSpace(21)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Int64Str(n int64) {
- w.Buffer.EnsureSpace(21)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Float32(n float32) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
-}
-
-func (w *Writer) Float32Str(n float32) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Float64(n float64) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
-}
-
-func (w *Writer) Float64Str(n float64) {
- w.Buffer.EnsureSpace(20)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
- w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
- w.Buffer.Buf = append(w.Buffer.Buf, '"')
-}
-
-func (w *Writer) Bool(v bool) {
- w.Buffer.EnsureSpace(5)
- if v {
- w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
- } else {
- w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
- }
-}
-
-const chars = "0123456789abcdef"
-
-func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
- // Note: might make sense to use a table if there are more chars to escape. With 4 chars
- // it benchmarks the same.
- if escapeHTML {
- return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
- } else {
- return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
- }
-}
-
-func (w *Writer) String(s string) {
- w.Buffer.AppendByte('"')
-
- // Portions of the string that contain no escapes are appended as
- // byte slices.
-
- p := 0 // last non-escape symbol
-
- for i := 0; i < len(s); {
- c := s[i]
-
- if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
- // single-width character, no escaping is required
- i++
- continue
- } else if c < utf8.RuneSelf {
- // single-with character, need to escape
- w.Buffer.AppendString(s[p:i])
- switch c {
- case '\t':
- w.Buffer.AppendString(`\t`)
- case '\r':
- w.Buffer.AppendString(`\r`)
- case '\n':
- w.Buffer.AppendString(`\n`)
- case '\\':
- w.Buffer.AppendString(`\\`)
- case '"':
- w.Buffer.AppendString(`\"`)
- default:
- w.Buffer.AppendString(`\u00`)
- w.Buffer.AppendByte(chars[c>>4])
- w.Buffer.AppendByte(chars[c&0xf])
- }
-
- i++
- p = i
- continue
- }
-
- // broken utf
- runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
- if runeValue == utf8.RuneError && runeWidth == 1 {
- w.Buffer.AppendString(s[p:i])
- w.Buffer.AppendString(`\ufffd`)
- i++
- p = i
- continue
- }
-
- // jsonp stuff - tab separator and line separator
- if runeValue == '\u2028' || runeValue == '\u2029' {
- w.Buffer.AppendString(s[p:i])
- w.Buffer.AppendString(`\u202`)
- w.Buffer.AppendByte(chars[runeValue&0xf])
- i += runeWidth
- p = i
- continue
- }
- i += runeWidth
- }
- w.Buffer.AppendString(s[p:])
- w.Buffer.AppendByte('"')
-}
-
-const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-const padChar = '='
-
-func (w *Writer) base64(in []byte) {
-
- if len(in) == 0 {
- return
- }
-
- w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
-
- si := 0
- n := (len(in) / 3) * 3
-
- for si < n {
- // Convert 3x 8bit source bytes into 4 bytes
- val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
-
- w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
-
- si += 3
- }
-
- remain := len(in) - si
- if remain == 0 {
- return
- }
-
- // Add the remaining small block
- val := uint(in[si+0]) << 16
- if remain == 2 {
- val |= uint(in[si+1]) << 8
- }
-
- w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
-
- switch remain {
- case 2:
- w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
- case 1:
- w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
- }
-}
diff --git a/vendor/github.com/mailru/easyjson/raw.go b/vendor/github.com/mailru/easyjson/raw.go
deleted file mode 100644
index 81bd002e1..000000000
--- a/vendor/github.com/mailru/easyjson/raw.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package easyjson
-
-import (
- "github.com/mailru/easyjson/jlexer"
- "github.com/mailru/easyjson/jwriter"
-)
-
-// RawMessage is a raw piece of JSON (number, string, bool, object, array or
-// null) that is extracted without parsing and output as is during marshaling.
-type RawMessage []byte
-
-// MarshalEasyJSON does JSON marshaling using easyjson interface.
-func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) {
- if len(*v) == 0 {
- w.RawString("null")
- } else {
- w.Raw(*v, nil)
- }
-}
-
-// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
-func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {
- *v = RawMessage(l.Raw())
-}
-
-// UnmarshalJSON implements encoding/json.Unmarshaler interface.
-func (v *RawMessage) UnmarshalJSON(data []byte) error {
- *v = data
- return nil
-}
-
-var nullBytes = []byte("null")
-
-// MarshalJSON implements encoding/json.Marshaler interface.
-func (v RawMessage) MarshalJSON() ([]byte, error) {
- if len(v) == 0 {
- return nullBytes, nil
- }
- return v, nil
-}
-
-// IsDefined is required for integration with omitempty easyjson logic.
-func (v *RawMessage) IsDefined() bool {
- return len(*v) > 0
-}