aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/docker/distribution/registry/client
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/docker/distribution/registry/client')
-rw-r--r--vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go27
-rw-r--r--vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go237
-rw-r--r--vendor/github.com/docker/distribution/registry/client/blob_writer.go162
-rw-r--r--vendor/github.com/docker/distribution/registry/client/errors.go139
-rw-r--r--vendor/github.com/docker/distribution/registry/client/repository.go853
-rw-r--r--vendor/github.com/docker/distribution/registry/client/transport/http_reader.go251
-rw-r--r--vendor/github.com/docker/distribution/registry/client/transport/transport.go147
7 files changed, 1816 insertions, 0 deletions
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
new file mode 100644
index 000000000..2c3ebe165
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
@@ -0,0 +1,27 @@
+package challenge
+
+import (
+ "net/url"
+ "strings"
+)
+
+// FROM: https://golang.org/src/net/http/http.go
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// FROM: http://golang.org/src/net/http/transport.go
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+// FROM: http://golang.org/src/net/http/transport.go
+func canonicalAddr(url *url.URL) string {
+ addr := url.Host
+ if !hasPort(addr) {
+ return addr + ":" + portMap[url.Scheme]
+ }
+ return addr
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
new file mode 100644
index 000000000..c9bdfc355
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
@@ -0,0 +1,237 @@
+package challenge
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+)
+
+// Challenge carries information from a WWW-Authenticate response header.
+// See RFC 2617.
+type Challenge struct {
+ // Scheme is the auth-scheme according to RFC 2617
+ Scheme string
+
+ // Parameters are the auth-params according to RFC 2617
+ Parameters map[string]string
+}
+
+// Manager manages the challenges for endpoints.
+// The challenges are pulled out of HTTP responses. Only
+// responses which expect challenges should be added to
+// the manager, since a non-unauthorized request will be
+// viewed as not requiring challenges.
+type Manager interface {
+ // GetChallenges returns the challenges for the given
+ // endpoint URL.
+ GetChallenges(endpoint url.URL) ([]Challenge, error)
+
+ // AddResponse adds the response to the challenge
+ // manager. The challenges will be parsed out of
+ // the WWW-Authenicate headers and added to the
+ // URL which was produced the response. If the
+ // response was authorized, any challenges for the
+ // endpoint will be cleared.
+ AddResponse(resp *http.Response) error
+}
+
+// NewSimpleManager returns an instance of
+// Manger which only maps endpoints to challenges
+// based on the responses which have been added the
+// manager. The simple manager will make no attempt to
+// perform requests on the endpoints or cache the responses
+// to a backend.
+func NewSimpleManager() Manager {
+ return &simpleManager{
+ Challanges: make(map[string][]Challenge),
+ }
+}
+
+type simpleManager struct {
+ sync.RWMutex
+ Challanges map[string][]Challenge
+}
+
+func normalizeURL(endpoint *url.URL) {
+ endpoint.Host = strings.ToLower(endpoint.Host)
+ endpoint.Host = canonicalAddr(endpoint)
+}
+
+func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
+ normalizeURL(&endpoint)
+
+ m.RLock()
+ defer m.RUnlock()
+ challenges := m.Challanges[endpoint.String()]
+ return challenges, nil
+}
+
+func (m *simpleManager) AddResponse(resp *http.Response) error {
+ challenges := ResponseChallenges(resp)
+ if resp.Request == nil {
+ return fmt.Errorf("missing request reference")
+ }
+ urlCopy := url.URL{
+ Path: resp.Request.URL.Path,
+ Host: resp.Request.URL.Host,
+ Scheme: resp.Request.URL.Scheme,
+ }
+ normalizeURL(&urlCopy)
+
+ m.Lock()
+ defer m.Unlock()
+ m.Challanges[urlCopy.String()] = challenges
+ return nil
+}
+
+// Octet types from RFC 2616.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET = <any 8-bit sequence of data>
+ // CHAR = <any US-ASCII character (octets 0 - 127)>
+ // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ // CR = <US-ASCII CR, carriage return (13)>
+ // LF = <US-ASCII LF, linefeed (10)>
+ // SP = <US-ASCII SP, space (32)>
+ // HT = <US-ASCII HT, horizontal-tab (9)>
+ // <"> = <US-ASCII double-quote mark (34)>
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT = <any OCTET except CTLs, but including LWS>
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*<any CHAR except CTLs or separators>
+ // qdtext = <any TEXT except <">>
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// ResponseChallenges returns a list of authorization challenges
+// for the given http Response. Challenges are only checked if
+// the response status code was a 401.
+func ResponseChallenges(resp *http.Response) []Challenge {
+ if resp.StatusCode == http.StatusUnauthorized {
+ // Parse the WWW-Authenticate Header and store the challenges
+ // on this endpoint object.
+ return parseAuthHeader(resp.Header)
+ }
+
+ return nil
+}
+
+func parseAuthHeader(header http.Header) []Challenge {
+ challenges := []Challenge{}
+ for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+ v, p := parseValueAndParams(h)
+ if v != "" {
+ challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
+ }
+ }
+ return challenges
+}
+
+func parseValueAndParams(header string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s := expectToken(header)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ s = "," + skipSpace(s)
+ for strings.HasPrefix(s, ",") {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s[1:]))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return
+ }
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
new file mode 100644
index 000000000..e3ffcb00f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
@@ -0,0 +1,162 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+)
+
+type httpBlobUpload struct {
+ statter distribution.BlobStatter
+ client *http.Client
+
+ uuid string
+ startedAt time.Time
+
+ location string // always the last value of the location header.
+ offset int64
+ closed bool
+}
+
+func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
+ panic("Not implemented")
+}
+
+func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode == http.StatusNotFound {
+ return distribution.ErrBlobUploadUnknown
+ }
+ return HandleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
+ req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
+ if err != nil {
+ return 0, err
+ }
+ defer req.Body.Close()
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ if !SuccessStatus(resp.StatusCode) {
+ return 0, hbu.handleErrorResponse(resp)
+ }
+
+ hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+ hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+ if err != nil {
+ return 0, err
+ }
+ rng := resp.Header.Get("Range")
+ var start, end int64
+ if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+ return 0, err
+ } else if n != 2 || end < start {
+ return 0, fmt.Errorf("bad range format: %s", rng)
+ }
+
+ return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
+ req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
+ if err != nil {
+ return 0, err
+ }
+ req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
+ req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
+ req.Header.Set("Content-Type", "application/octet-stream")
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ if !SuccessStatus(resp.StatusCode) {
+ return 0, hbu.handleErrorResponse(resp)
+ }
+
+ hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+ hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+ if err != nil {
+ return 0, err
+ }
+ rng := resp.Header.Get("Range")
+ var start, end int
+ if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+ return 0, err
+ } else if n != 2 || end < start {
+ return 0, fmt.Errorf("bad range format: %s", rng)
+ }
+
+ return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Size() int64 {
+ return hbu.offset
+}
+
+func (hbu *httpBlobUpload) ID() string {
+ return hbu.uuid
+}
+
+func (hbu *httpBlobUpload) StartedAt() time.Time {
+ return hbu.startedAt
+}
+
+func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
+ // TODO(dmcgowan): Check if already finished, if so just fetch
+ req, err := http.NewRequest("PUT", hbu.location, nil)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ values := req.URL.Query()
+ values.Set("digest", desc.Digest.String())
+ req.URL.RawQuery = values.Encode()
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if !SuccessStatus(resp.StatusCode) {
+ return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
+ }
+
+ return hbu.statter.Stat(ctx, desc.Digest)
+}
+
+func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
+ req, err := http.NewRequest("DELETE", hbu.location, nil)
+ if err != nil {
+ return err
+ }
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return hbu.handleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) Close() error {
+ hbu.closed = true
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go
new file mode 100644
index 000000000..52d49d5d2
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/errors.go
@@ -0,0 +1,139 @@
+package client
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/docker/distribution/registry/client/auth/challenge"
+)
+
+// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
+// errcode.Errors slice.
+var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
+
+// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
+// returned when making a registry api call.
+type UnexpectedHTTPStatusError struct {
+ Status string
+}
+
+func (e *UnexpectedHTTPStatusError) Error() string {
+ return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
+}
+
+// UnexpectedHTTPResponseError is returned when an expected HTTP status code
+// is returned, but the content was unexpected and failed to be parsed.
+type UnexpectedHTTPResponseError struct {
+ ParseErr error
+ StatusCode int
+ Response []byte
+}
+
+func (e *UnexpectedHTTPResponseError) Error() string {
+ return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
+}
+
+func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
+ var errors errcode.Errors
+ body, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ // For backward compatibility, handle irregularly formatted
+ // messages that contain a "details" field.
+ var detailsErr struct {
+ Details string `json:"details"`
+ }
+ err = json.Unmarshal(body, &detailsErr)
+ if err == nil && detailsErr.Details != "" {
+ switch statusCode {
+ case http.StatusUnauthorized:
+ return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
+ case http.StatusTooManyRequests:
+ return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
+ default:
+ return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
+ }
+ }
+
+ if err := json.Unmarshal(body, &errors); err != nil {
+ return &UnexpectedHTTPResponseError{
+ ParseErr: err,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ if len(errors) == 0 {
+ // If there was no error specified in the body, return
+ // UnexpectedHTTPResponseError.
+ return &UnexpectedHTTPResponseError{
+ ParseErr: ErrNoErrorsInBody,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ return errors
+}
+
+func makeErrorList(err error) []error {
+ if errL, ok := err.(errcode.Errors); ok {
+ return []error(errL)
+ }
+ return []error{err}
+}
+
+func mergeErrors(err1, err2 error) error {
+ return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
+}
+
+// HandleErrorResponse returns error parsed from HTTP response for an
+// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
+// UnexpectedHTTPStatusError returned for response code outside of expected
+// range.
+func HandleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+ // Check for OAuth errors within the `WWW-Authenticate` header first
+ // See https://tools.ietf.org/html/rfc6750#section-3
+ for _, c := range challenge.ResponseChallenges(resp) {
+ if c.Scheme == "bearer" {
+ var err errcode.Error
+ // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
+ switch c.Parameters["error"] {
+ case "invalid_token":
+ err.Code = errcode.ErrorCodeUnauthorized
+ case "insufficient_scope":
+ err.Code = errcode.ErrorCodeDenied
+ default:
+ continue
+ }
+ if description := c.Parameters["error_description"]; description != "" {
+ err.Message = description
+ } else {
+ err.Message = err.Code.Message()
+ }
+
+ return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
+ }
+ }
+ err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
+ if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
+ return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
+ }
+ return err
+ }
+ return &UnexpectedHTTPStatusError{Status: resp.Status}
+}
+
+// SuccessStatus returns true if the argument is a successful HTTP response
+// code (in the range 200 - 399 inclusive).
+func SuccessStatus(status int) bool {
+ return status >= 200 && status <= 399
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
new file mode 100644
index 000000000..b82a968e2
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/repository.go
@@ -0,0 +1,853 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/distribution/registry/client/transport"
+ "github.com/docker/distribution/registry/storage/cache"
+ "github.com/docker/distribution/registry/storage/cache/memory"
+ "github.com/opencontainers/go-digest"
+)
+
+// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
+type Registry interface {
+ Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+}
+
+// checkHTTPRedirect is a callback that can manipulate redirected HTTP
+// requests. It is used to preserve Accept and Range headers.
+func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+
+ if len(via) > 0 {
+ for headerName, headerVals := range via[0].Header {
+ if headerName != "Accept" && headerName != "Range" {
+ continue
+ }
+ for _, val := range headerVals {
+ // Don't add to redirected request if redirected
+ // request already has a header with the same
+ // name and value.
+ hasValue := false
+ for _, existingVal := range req.Header[headerName] {
+ if existingVal == val {
+ hasValue = true
+ break
+ }
+ }
+ if !hasValue {
+ req.Header.Add(headerName, val)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// NewRegistry creates a registry namespace which can be used to get a listing of repositories
+func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
+ ub, err := v2.NewURLBuilderFromString(baseURL, false)
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ Timeout: 1 * time.Minute,
+ CheckRedirect: checkHTTPRedirect,
+ }
+
+ return &registry{
+ client: client,
+ ub: ub,
+ context: ctx,
+ }, nil
+}
+
+type registry struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+}
+
+// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
+// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
+// are no more entries
+func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
+ var numFilled int
+ var returnErr error
+
+ values := buildCatalogValues(len(entries), last)
+ u, err := r.ub.BuildCatalogURL(values)
+ if err != nil {
+ return 0, err
+ }
+
+ resp, err := r.client.Get(u)
+ if err != nil {
+ return 0, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ var ctlg struct {
+ Repositories []string `json:"repositories"`
+ }
+ decoder := json.NewDecoder(resp.Body)
+
+ if err := decoder.Decode(&ctlg); err != nil {
+ return 0, err
+ }
+
+ for cnt := range ctlg.Repositories {
+ entries[cnt] = ctlg.Repositories[cnt]
+ }
+ numFilled = len(ctlg.Repositories)
+
+ link := resp.Header.Get("Link")
+ if link == "" {
+ returnErr = io.EOF
+ }
+ } else {
+ return 0, HandleErrorResponse(resp)
+ }
+
+ return numFilled, returnErr
+}
+
+// NewRepository creates a new Repository for the given repository name and base URL.
+func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
+ ub, err := v2.NewURLBuilderFromString(baseURL, false)
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ CheckRedirect: checkHTTPRedirect,
+ // TODO(dmcgowan): create cookie jar
+ }
+
+ return &repository{
+ client: client,
+ ub: ub,
+ name: name,
+ context: ctx,
+ }, nil
+}
+
+type repository struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+ name reference.Named
+}
+
+func (r *repository) Named() reference.Named {
+ return r.name
+}
+
+func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
+ statter := &blobStatter{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ }
+ return &blobs{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
+ }
+}
+
+func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
+ // todo(richardscothern): options should be sent over the wire
+ return &manifests{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ etags: make(map[string]string),
+ }, nil
+}
+
+func (r *repository) Tags(ctx context.Context) distribution.TagService {
+ return &tags{
+ client: r.client,
+ ub: r.ub,
+ context: r.context,
+ name: r.Named(),
+ }
+}
+
+// tags implements remote tagging operations.
+type tags struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+ name reference.Named
+}
+
+// All returns all tags
+func (t *tags) All(ctx context.Context) ([]string, error) {
+ var tags []string
+
+ u, err := t.ub.BuildTagsURL(t.name)
+ if err != nil {
+ return tags, err
+ }
+
+ for {
+ resp, err := t.client.Get(u)
+ if err != nil {
+ return tags, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return tags, err
+ }
+
+ tagsResponse := struct {
+ Tags []string `json:"tags"`
+ }{}
+ if err := json.Unmarshal(b, &tagsResponse); err != nil {
+ return tags, err
+ }
+ tags = append(tags, tagsResponse.Tags...)
+ if link := resp.Header.Get("Link"); link != "" {
+ u = strings.Trim(strings.Split(link, ";")[0], "<>")
+ } else {
+ return tags, nil
+ }
+ } else {
+ return tags, HandleErrorResponse(resp)
+ }
+ }
+}
+
+func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
+ desc := distribution.Descriptor{}
+ headers := response.Header
+
+ ctHeader := headers.Get("Content-Type")
+ if ctHeader == "" {
+ return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
+ }
+ desc.MediaType = ctHeader
+
+ digestHeader := headers.Get("Docker-Content-Digest")
+ if digestHeader == "" {
+ bytes, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ return desc, nil
+ }
+
+ dgst, err := digest.Parse(digestHeader)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ desc.Digest = dgst
+
+ lengthHeader := headers.Get("Content-Length")
+ if lengthHeader == "" {
+ return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
+ }
+ length, err := strconv.ParseInt(lengthHeader, 10, 64)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ desc.Size = length
+
+ return desc, nil
+
+}
+
+// Get issues a HEAD request for a Manifest against its named endpoint in order
+// to construct a descriptor for the tag. If the registry doesn't support HEADing
+// a manifest, fallback to GET.
+func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
+ ref, err := reference.WithTag(t.name, tag)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ u, err := t.ub.BuildManifestURL(ref)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ newRequest := func(method string) (*http.Response, error) {
+ req, err := http.NewRequest(method, u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range distribution.ManifestMediaTypes() {
+ req.Header.Add("Accept", t)
+ }
+ resp, err := t.client.Do(req)
+ return resp, err
+ }
+
+ resp, err := newRequest("HEAD")
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ switch {
+ case resp.StatusCode >= 200 && resp.StatusCode < 400:
+ return descriptorFromResponse(resp)
+ default:
+ // if the response is an error - there will be no body to decode.
+ // Issue a GET request:
+ // - for data from a server that does not handle HEAD
+ // - to get error details in case of a failure
+ resp, err = newRequest("GET")
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 200 && resp.StatusCode < 400 {
+ return descriptorFromResponse(resp)
+ }
+ return distribution.Descriptor{}, HandleErrorResponse(resp)
+ }
+}
+
+func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
+ panic("not implemented")
+}
+
+func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
+ panic("not implemented")
+}
+
+func (t *tags) Untag(ctx context.Context, tag string) error {
+ panic("not implemented")
+}
+
+type manifests struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+ etags map[string]string
+}
+
+func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
+ ref, err := reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return false, err
+ }
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return false, err
+ }
+
+ resp, err := ms.client.Head(u)
+ if err != nil {
+ return false, err
+ }
+
+ if SuccessStatus(resp.StatusCode) {
+ return true, nil
+ } else if resp.StatusCode == http.StatusNotFound {
+ return false, nil
+ }
+ return false, HandleErrorResponse(resp)
+}
+
+// AddEtagToTag allows a client to supply an eTag to Get which will be
+// used for a conditional HTTP request. If the eTag matches, a nil manifest
+// and ErrManifestNotModified error will be returned. etag is automatically
+// quoted when added to this map.
+func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
+ return etagOption{tag, etag}
+}
+
+type etagOption struct{ tag, etag string }
+
+func (o etagOption) Apply(ms distribution.ManifestService) error {
+ if ms, ok := ms.(*manifests); ok {
+ ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
+ return nil
+ }
+ return fmt.Errorf("etag options is a client-only option")
+}
+
+// ReturnContentDigest allows a client to set a the content digest on
+// a successful request from the 'Docker-Content-Digest' header. This
+// returned digest is represents the digest which the registry uses
+// to refer to the content and can be used to delete the content.
+func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
+ return contentDigestOption{dgst}
+}
+
+type contentDigestOption struct{ digest *digest.Digest }
+
+func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
+ return nil
+}
+
+func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
+ var (
+ digestOrTag string
+ ref reference.Named
+ err error
+ contentDgst *digest.Digest
+ )
+
+ for _, option := range options {
+ if opt, ok := option.(distribution.WithTagOption); ok {
+ digestOrTag = opt.Tag
+ ref, err = reference.WithTag(ms.name, opt.Tag)
+ if err != nil {
+ return nil, err
+ }
+ } else if opt, ok := option.(contentDigestOption); ok {
+ contentDgst = opt.digest
+ } else {
+ err := option.Apply(ms)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if digestOrTag == "" {
+ digestOrTag = dgst.String()
+ ref, err = reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range distribution.ManifestMediaTypes() {
+ req.Header.Add("Accept", t)
+ }
+
+ if _, ok := ms.etags[digestOrTag]; ok {
+ req.Header.Set("If-None-Match", ms.etags[digestOrTag])
+ }
+
+ resp, err := ms.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotModified {
+ return nil, distribution.ErrManifestNotModified
+ } else if SuccessStatus(resp.StatusCode) {
+ if contentDgst != nil {
+ dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
+ if err == nil {
+ *contentDgst = dgst
+ }
+ }
+ mt := resp.Header.Get("Content-Type")
+ body, err := ioutil.ReadAll(resp.Body)
+
+ if err != nil {
+ return nil, err
+ }
+ m, _, err := distribution.UnmarshalManifest(mt, body)
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+ }
+ return nil, HandleErrorResponse(resp)
+}
+
+// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
+// tag name in order to build the correct upload URL.
+func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
+ ref := ms.name
+ var tagged bool
+
+ for _, option := range options {
+ if opt, ok := option.(distribution.WithTagOption); ok {
+ var err error
+ ref, err = reference.WithTag(ref, opt.Tag)
+ if err != nil {
+ return "", err
+ }
+ tagged = true
+ } else {
+ err := option.Apply(ms)
+ if err != nil {
+ return "", err
+ }
+ }
+ }
+ mediaType, p, err := m.Payload()
+ if err != nil {
+ return "", err
+ }
+
+ if !tagged {
+ // generate a canonical digest and Put by digest
+ _, d, err := distribution.UnmarshalManifest(mediaType, p)
+ if err != nil {
+ return "", err
+ }
+ ref, err = reference.WithDigest(ref, d.Digest)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ manifestURL, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return "", err
+ }
+
+ putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
+ if err != nil {
+ return "", err
+ }
+
+ putRequest.Header.Set("Content-Type", mediaType)
+
+ resp, err := ms.client.Do(putRequest)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ dgstHeader := resp.Header.Get("Docker-Content-Digest")
+ dgst, err := digest.Parse(dgstHeader)
+ if err != nil {
+ return "", err
+ }
+
+ return dgst, nil
+ }
+
+ return "", HandleErrorResponse(resp)
+}
+
+func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
+ ref, err := reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return err
+ }
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := ms.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return HandleErrorResponse(resp)
+}
+
+// todo(richardscothern): Restore interface and implementation with merge of #1050
+/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
+ panic("not supported")
+}*/
+
+type blobs struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+
+ statter distribution.BlobDescriptorService
+ distribution.BlobDeleter
+}
+
+func sanitizeLocation(location, base string) (string, error) {
+ baseURL, err := url.Parse(base)
+ if err != nil {
+ return "", err
+ }
+
+ locationURL, err := url.Parse(location)
+ if err != nil {
+ return "", err
+ }
+
+ return baseURL.ResolveReference(locationURL).String(), nil
+}
+
+func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ return bs.statter.Stat(ctx, dgst)
+
+}
+
+func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
+ reader, err := bs.Open(ctx, dgst)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+
+ return ioutil.ReadAll(reader)
+}
+
+func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return nil, err
+ }
+ blobURL, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return transport.NewHTTPReadSeeker(bs.client, blobURL,
+ func(resp *http.Response) error {
+ if resp.StatusCode == http.StatusNotFound {
+ return distribution.ErrBlobUnknown
+ }
+ return HandleErrorResponse(resp)
+ }), nil
+}
+
+func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
+ panic("not implemented")
+}
+
+func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
+ writer, err := bs.Create(ctx)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ dgstr := digest.Canonical.Digester()
+ n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ if n < int64(len(p)) {
+ return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
+ }
+
+ desc := distribution.Descriptor{
+ MediaType: mediaType,
+ Size: int64(len(p)),
+ Digest: dgstr.Digest(),
+ }
+
+ return writer.Commit(ctx, desc)
+}
+
+type optionFunc func(interface{}) error
+
+func (f optionFunc) Apply(v interface{}) error {
+ return f(v)
+}
+
+// WithMountFrom returns a BlobCreateOption which designates that the blob should be
+// mounted from the given canonical reference.
+func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
+ return optionFunc(func(v interface{}) error {
+ opts, ok := v.(*distribution.CreateOptions)
+ if !ok {
+ return fmt.Errorf("unexpected options type: %T", v)
+ }
+
+ opts.Mount.ShouldMount = true
+ opts.Mount.From = ref
+
+ return nil
+ })
+}
+
+func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
+ var opts distribution.CreateOptions
+
+ for _, option := range options {
+ err := option.Apply(&opts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var values []url.Values
+
+ if opts.Mount.ShouldMount {
+ values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
+ }
+
+ u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := bs.client.Post(u, "", nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ switch resp.StatusCode {
+ case http.StatusCreated:
+ desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
+ if err != nil {
+ return nil, err
+ }
+ return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
+ case http.StatusAccepted:
+ // TODO(dmcgowan): Check for invalid UUID
+ uuid := resp.Header.Get("Docker-Upload-UUID")
+ location, err := sanitizeLocation(resp.Header.Get("Location"), u)
+ if err != nil {
+ return nil, err
+ }
+
+ return &httpBlobUpload{
+ statter: bs.statter,
+ client: bs.client,
+ uuid: uuid,
+ startedAt: time.Now(),
+ location: location,
+ }, nil
+ default:
+ return nil, HandleErrorResponse(resp)
+ }
+}
+
+func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
+ panic("not implemented")
+}
+
+func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
+ return bs.statter.Clear(ctx, dgst)
+}
+
+type blobStatter struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+}
+
+func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ u, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ resp, err := bs.client.Head(u)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ lengthHeader := resp.Header.Get("Content-Length")
+ if lengthHeader == "" {
+ return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
+ }
+
+ length, err := strconv.ParseInt(lengthHeader, 10, 64)
+ if err != nil {
+ return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
+ }
+
+ return distribution.Descriptor{
+ MediaType: resp.Header.Get("Content-Type"),
+ Size: length,
+ Digest: dgst,
+ }, nil
+ } else if resp.StatusCode == http.StatusNotFound {
+ return distribution.Descriptor{}, distribution.ErrBlobUnknown
+ }
+ return distribution.Descriptor{}, HandleErrorResponse(resp)
+}
+
+func buildCatalogValues(maxEntries int, last string) url.Values {
+ values := url.Values{}
+
+ if maxEntries > 0 {
+ values.Add("n", strconv.Itoa(maxEntries))
+ }
+
+ if last != "" {
+ values.Add("last", last)
+ }
+
+ return values
+}
+
+func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return err
+ }
+ blobURL, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("DELETE", blobURL, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := bs.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return HandleErrorResponse(resp)
+}
+
+func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
new file mode 100644
index 000000000..e5ff09d75
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
@@ -0,0 +1,251 @@
+package transport
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+var (
+ contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
+
+ // ErrWrongCodeForByteRange is returned if the client sends a request
+ // with a Range header but the server returns a 2xx or 3xx code other
+ // than 206 Partial Content.
+ ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
+)
+
+// ReadSeekCloser combines io.ReadSeeker with io.Closer.
+type ReadSeekCloser interface {
+ io.ReadSeeker
+ io.Closer
+}
+
+// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
+// request. When seeking and starting a read from a non-zero offset
+// the a "Range" header will be added which sets the offset.
+// TODO(dmcgowan): Move this into a separate utility package
+func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
+ return &httpReadSeeker{
+ client: client,
+ url: url,
+ errorHandler: errorHandler,
+ }
+}
+
+type httpReadSeeker struct {
+ client *http.Client
+ url string
+
+ // errorHandler creates an error from an unsuccessful HTTP response.
+ // This allows the error to be created with the HTTP response body
+ // without leaking the body through a returned error.
+ errorHandler func(*http.Response) error
+
+ size int64
+
+ // rc is the remote read closer.
+ rc io.ReadCloser
+ // readerOffset tracks the offset as of the last read.
+ readerOffset int64
+ // seekOffset allows Seek to override the offset. Seek changes
+ // seekOffset instead of changing readOffset directly so that
+ // connection resets can be delayed and possibly avoided if the
+ // seek is undone (i.e. seeking to the end and then back to the
+ // beginning).
+ seekOffset int64
+ err error
+}
+
+func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
+ if hrs.err != nil {
+ return 0, hrs.err
+ }
+
+ // If we sought to a different position, we need to reset the
+ // connection. This logic is here instead of Seek so that if
+ // a seek is undone before the next read, the connection doesn't
+ // need to be closed and reopened. A common example of this is
+ // seeking to the end to determine the length, and then seeking
+ // back to the original position.
+ if hrs.readerOffset != hrs.seekOffset {
+ hrs.reset()
+ }
+
+ hrs.readerOffset = hrs.seekOffset
+
+ rd, err := hrs.reader()
+ if err != nil {
+ return 0, err
+ }
+
+ n, err = rd.Read(p)
+ hrs.seekOffset += int64(n)
+ hrs.readerOffset += int64(n)
+
+ return n, err
+}
+
+func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ if hrs.err != nil {
+ return 0, hrs.err
+ }
+
+ lastReaderOffset := hrs.readerOffset
+
+ if whence == os.SEEK_SET && hrs.rc == nil {
+ // If no request has been made yet, and we are seeking to an
+ // absolute position, set the read offset as well to avoid an
+ // unnecessary request.
+ hrs.readerOffset = offset
+ }
+
+ _, err := hrs.reader()
+ if err != nil {
+ hrs.readerOffset = lastReaderOffset
+ return 0, err
+ }
+
+ newOffset := hrs.seekOffset
+
+ switch whence {
+ case os.SEEK_CUR:
+ newOffset += offset
+ case os.SEEK_END:
+ if hrs.size < 0 {
+ return 0, errors.New("content length not known")
+ }
+ newOffset = hrs.size + offset
+ case os.SEEK_SET:
+ newOffset = offset
+ }
+
+ if newOffset < 0 {
+ err = errors.New("cannot seek to negative position")
+ } else {
+ hrs.seekOffset = newOffset
+ }
+
+ return hrs.seekOffset, err
+}
+
+func (hrs *httpReadSeeker) Close() error {
+ if hrs.err != nil {
+ return hrs.err
+ }
+
+ // close and release reader chain
+ if hrs.rc != nil {
+ hrs.rc.Close()
+ }
+
+ hrs.rc = nil
+
+ hrs.err = errors.New("httpLayer: closed")
+
+ return nil
+}
+
+func (hrs *httpReadSeeker) reset() {
+ if hrs.err != nil {
+ return
+ }
+ if hrs.rc != nil {
+ hrs.rc.Close()
+ hrs.rc = nil
+ }
+}
+
+func (hrs *httpReadSeeker) reader() (io.Reader, error) {
+ if hrs.err != nil {
+ return nil, hrs.err
+ }
+
+ if hrs.rc != nil {
+ return hrs.rc, nil
+ }
+
+ req, err := http.NewRequest("GET", hrs.url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if hrs.readerOffset > 0 {
+ // If we are at different offset, issue a range request from there.
+ req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
+ // TODO: get context in here
+ // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
+ }
+
+ req.Header.Add("Accept-Encoding", "identity")
+ resp, err := hrs.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // Normally would use client.SuccessStatus, but that would be a cyclic
+ // import
+ if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
+ if hrs.readerOffset > 0 {
+ if resp.StatusCode != http.StatusPartialContent {
+ return nil, ErrWrongCodeForByteRange
+ }
+
+ contentRange := resp.Header.Get("Content-Range")
+ if contentRange == "" {
+ return nil, errors.New("no Content-Range header found in HTTP 206 response")
+ }
+
+ submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
+ if len(submatches) < 4 {
+ return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
+ }
+
+ startByte, err := strconv.ParseUint(submatches[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
+ }
+
+ if startByte != uint64(hrs.readerOffset) {
+ return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
+ }
+
+ endByte, err := strconv.ParseUint(submatches[2], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
+ }
+
+ if submatches[3] == "*" {
+ hrs.size = -1
+ } else {
+ size, err := strconv.ParseUint(submatches[3], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
+ }
+
+ if endByte+1 != size {
+ return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
+ }
+
+ hrs.size = int64(size)
+ }
+ } else if resp.StatusCode == http.StatusOK {
+ hrs.size = resp.ContentLength
+ } else {
+ hrs.size = -1
+ }
+ hrs.rc = resp.Body
+ } else {
+ defer resp.Body.Close()
+ if hrs.errorHandler != nil {
+ return nil, hrs.errorHandler(resp)
+ }
+ return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
+ }
+
+ return hrs.rc, nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
new file mode 100644
index 000000000..30e45fab0
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
@@ -0,0 +1,147 @@
+package transport
+
+import (
+ "io"
+ "net/http"
+ "sync"
+)
+
+// RequestModifier represents an object which will do an inplace
+// modification of an HTTP request.
+type RequestModifier interface {
+ ModifyRequest(*http.Request) error
+}
+
+type headerModifier http.Header
+
+// NewHeaderRequestModifier returns a new RequestModifier which will
+// add the given headers to a request.
+func NewHeaderRequestModifier(header http.Header) RequestModifier {
+ return headerModifier(header)
+}
+
+func (h headerModifier) ModifyRequest(req *http.Request) error {
+ for k, s := range http.Header(h) {
+ req.Header[k] = append(req.Header[k], s...)
+ }
+
+ return nil
+}
+
+// NewTransport creates a new transport which will apply modifiers to
+// the request on a RoundTrip call.
+func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
+ return &transport{
+ Modifiers: modifiers,
+ Base: base,
+ }
+}
+
+// transport is an http.RoundTripper that makes HTTP requests after
+// copying and modifying the request
+type transport struct {
+ Modifiers []RequestModifier
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req2 := cloneRequest(req)
+ for _, modifier := range t.Modifiers {
+ if err := modifier.ModifyRequest(req2); err != nil {
+ return nil, err
+ }
+ }
+
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}