summaryrefslogtreecommitdiff
path: root/vendor/github.com/docker
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/docker')
-rw-r--r--vendor/github.com/docker/distribution/LICENSE202
-rw-r--r--vendor/github.com/docker/distribution/README.md130
-rw-r--r--vendor/github.com/docker/distribution/blobs.go257
-rw-r--r--vendor/github.com/docker/distribution/context/context.go85
-rw-r--r--vendor/github.com/docker/distribution/context/doc.go89
-rw-r--r--vendor/github.com/docker/distribution/context/http.go366
-rw-r--r--vendor/github.com/docker/distribution/context/logger.go116
-rw-r--r--vendor/github.com/docker/distribution/context/trace.go104
-rw-r--r--vendor/github.com/docker/distribution/context/util.go24
-rw-r--r--vendor/github.com/docker/distribution/context/version.go16
-rw-r--r--vendor/github.com/docker/distribution/digestset/set.go247
-rw-r--r--vendor/github.com/docker/distribution/doc.go7
-rw-r--r--vendor/github.com/docker/distribution/errors.go115
-rw-r--r--vendor/github.com/docker/distribution/manifests.go125
-rw-r--r--vendor/github.com/docker/distribution/reference/helpers.go42
-rw-r--r--vendor/github.com/docker/distribution/reference/normalize.go170
-rw-r--r--vendor/github.com/docker/distribution/reference/reference.go433
-rw-r--r--vendor/github.com/docker/distribution/reference/regexp.go143
-rw-r--r--vendor/github.com/docker/distribution/registry.go97
-rw-r--r--vendor/github.com/docker/distribution/registry/api/errcode/errors.go267
-rw-r--r--vendor/github.com/docker/distribution/registry/api/errcode/handler.go44
-rw-r--r--vendor/github.com/docker/distribution/registry/api/errcode/register.go138
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/descriptors.go1596
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/doc.go9
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/errors.go136
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/headerparser.go161
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/routes.go49
-rw-r--r--vendor/github.com/docker/distribution/registry/api/v2/urls.go266
-rw-r--r--vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go27
-rw-r--r--vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go237
-rw-r--r--vendor/github.com/docker/distribution/registry/client/blob_writer.go162
-rw-r--r--vendor/github.com/docker/distribution/registry/client/errors.go139
-rw-r--r--vendor/github.com/docker/distribution/registry/client/repository.go853
-rw-r--r--vendor/github.com/docker/distribution/registry/client/transport/http_reader.go251
-rw-r--r--vendor/github.com/docker/distribution/registry/client/transport/transport.go147
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/cache.go35
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go101
-rw-r--r--vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go179
-rw-r--r--vendor/github.com/docker/distribution/tags.go27
-rw-r--r--vendor/github.com/docker/distribution/uuid/uuid.go126
-rw-r--r--vendor/github.com/docker/distribution/vendor.conf43
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/LICENSE20
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/README.md82
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/client/client.go121
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/client/command.go56
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go186
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/error.go102
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/helper.go14
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/credentials/version.go4
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c228
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go196
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h14
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go13
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go41
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c162
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go118
-rw-r--r--vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h13
-rw-r--r--vendor/github.com/docker/docker/LICENSE191
-rw-r--r--vendor/github.com/docker/docker/NOTICE19
-rw-r--r--vendor/github.com/docker/docker/README.md90
-rw-r--r--vendor/github.com/docker/docker/api/README.md42
-rw-r--r--vendor/github.com/docker/docker/api/common.go65
-rw-r--r--vendor/github.com/docker/docker/api/common_unix.go6
-rw-r--r--vendor/github.com/docker/docker/api/common_windows.go8
-rw-r--r--vendor/github.com/docker/docker/api/names.go9
-rw-r--r--vendor/github.com/docker/docker/api/types/auth.go22
-rw-r--r--vendor/github.com/docker/docker/api/types/blkiodev/blkio.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/client.go389
-rw-r--r--vendor/github.com/docker/docker/api/types/configs.go70
-rw-r--r--vendor/github.com/docker/docker/api/types/container/config.go69
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_changes.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_create.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_top.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_update.go17
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_wait.go17
-rw-r--r--vendor/github.com/docker/docker/api/types/container/host_config.go380
-rw-r--r--vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go41
-rw-r--r--vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go54
-rw-r--r--vendor/github.com/docker/docker/api/types/container/waitcondition.go22
-rw-r--r--vendor/github.com/docker/docker/api/types/error_response.go13
-rw-r--r--vendor/github.com/docker/docker/api/types/events/events.go52
-rw-r--r--vendor/github.com/docker/docker/api/types/filters/parse.go310
-rw-r--r--vendor/github.com/docker/docker/api/types/graph_driver_data.go17
-rw-r--r--vendor/github.com/docker/docker/api/types/id_response.go13
-rw-r--r--vendor/github.com/docker/docker/api/types/image/image_history.go37
-rw-r--r--vendor/github.com/docker/docker/api/types/image_delete_response_item.go15
-rw-r--r--vendor/github.com/docker/docker/api/types/image_summary.go49
-rw-r--r--vendor/github.com/docker/docker/api/types/mount/mount.go128
-rw-r--r--vendor/github.com/docker/docker/api/types/network/network.go108
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin.go200
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_device.go25
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_env.go25
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_interface_type.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_mount.go37
-rw-r--r--vendor/github.com/docker/docker/api/types/plugin_responses.go71
-rw-r--r--vendor/github.com/docker/docker/api/types/port.go23
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/authenticate.go21
-rw-r--r--vendor/github.com/docker/docker/api/types/registry/registry.go119
-rw-r--r--vendor/github.com/docker/docker/api/types/seccomp.go93
-rw-r--r--vendor/github.com/docker/docker/api/types/service_update_response.go12
-rw-r--r--vendor/github.com/docker/docker/api/types/stats.go181
-rw-r--r--vendor/github.com/docker/docker/api/types/strslice/strslice.go30
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/common.go40
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/config.go31
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/container.go72
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/network.go119
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/node.go115
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime.go19
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go3
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go712
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto18
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/secret.go32
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/service.go124
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/swarm.go217
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/task.go184
-rw-r--r--vendor/github.com/docker/docker/api/types/time/duration_convert.go12
-rw-r--r--vendor/github.com/docker/docker/api/types/time/timestamp.go124
-rw-r--r--vendor/github.com/docker/docker/api/types/types.go575
-rw-r--r--vendor/github.com/docker/docker/api/types/versions/README.md14
-rw-r--r--vendor/github.com/docker/docker/api/types/versions/compare.go62
-rw-r--r--vendor/github.com/docker/docker/api/types/volume.go69
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volumes_create.go29
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volumes_list.go23
-rw-r--r--vendor/github.com/docker/docker/client/README.md35
-rw-r--r--vendor/github.com/docker/docker/client/build_prune.go30
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_create.go13
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_delete.go20
-rw-r--r--vendor/github.com/docker/docker/client/checkpoint_list.go32
-rw-r--r--vendor/github.com/docker/docker/client/client.go314
-rw-r--r--vendor/github.com/docker/docker/client/client_unix.go6
-rw-r--r--vendor/github.com/docker/docker/client/client_windows.go4
-rw-r--r--vendor/github.com/docker/docker/client/config_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/config_inspect.go37
-rw-r--r--vendor/github.com/docker/docker/client/config_list.go38
-rw-r--r--vendor/github.com/docker/docker/client/config_remove.go13
-rw-r--r--vendor/github.com/docker/docker/client/config_update.go21
-rw-r--r--vendor/github.com/docker/docker/client/container_attach.go57
-rw-r--r--vendor/github.com/docker/docker/client/container_commit.go55
-rw-r--r--vendor/github.com/docker/docker/client/container_copy.go102
-rw-r--r--vendor/github.com/docker/docker/client/container_create.go56
-rw-r--r--vendor/github.com/docker/docker/client/container_diff.go23
-rw-r--r--vendor/github.com/docker/docker/client/container_exec.go54
-rw-r--r--vendor/github.com/docker/docker/client/container_export.go20
-rw-r--r--vendor/github.com/docker/docker/client/container_inspect.go54
-rw-r--r--vendor/github.com/docker/docker/client/container_kill.go17
-rw-r--r--vendor/github.com/docker/docker/client/container_list.go56
-rw-r--r--vendor/github.com/docker/docker/client/container_logs.go72
-rw-r--r--vendor/github.com/docker/docker/client/container_pause.go10
-rw-r--r--vendor/github.com/docker/docker/client/container_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/container_remove.go27
-rw-r--r--vendor/github.com/docker/docker/client/container_rename.go16
-rw-r--r--vendor/github.com/docker/docker/client/container_resize.go29
-rw-r--r--vendor/github.com/docker/docker/client/container_restart.go22
-rw-r--r--vendor/github.com/docker/docker/client/container_start.go24
-rw-r--r--vendor/github.com/docker/docker/client/container_stats.go26
-rw-r--r--vendor/github.com/docker/docker/client/container_stop.go21
-rw-r--r--vendor/github.com/docker/docker/client/container_top.go28
-rw-r--r--vendor/github.com/docker/docker/client/container_unpause.go10
-rw-r--r--vendor/github.com/docker/docker/client/container_update.go22
-rw-r--r--vendor/github.com/docker/docker/client/container_wait.go84
-rw-r--r--vendor/github.com/docker/docker/client/disk_usage.go26
-rw-r--r--vendor/github.com/docker/docker/client/distribution_inspect.go35
-rw-r--r--vendor/github.com/docker/docker/client/errors.go300
-rw-r--r--vendor/github.com/docker/docker/client/events.go102
-rw-r--r--vendor/github.com/docker/docker/client/hijack.go208
-rw-r--r--vendor/github.com/docker/docker/client/image_build.go128
-rw-r--r--vendor/github.com/docker/docker/client/image_create.go34
-rw-r--r--vendor/github.com/docker/docker/client/image_history.go22
-rw-r--r--vendor/github.com/docker/docker/client/image_import.go37
-rw-r--r--vendor/github.com/docker/docker/client/image_inspect.go33
-rw-r--r--vendor/github.com/docker/docker/client/image_list.go45
-rw-r--r--vendor/github.com/docker/docker/client/image_load.go30
-rw-r--r--vendor/github.com/docker/docker/client/image_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/image_pull.go61
-rw-r--r--vendor/github.com/docker/docker/client/image_push.go56
-rw-r--r--vendor/github.com/docker/docker/client/image_remove.go31
-rw-r--r--vendor/github.com/docker/docker/client/image_save.go22
-rw-r--r--vendor/github.com/docker/docker/client/image_search.go51
-rw-r--r--vendor/github.com/docker/docker/client/image_tag.go37
-rw-r--r--vendor/github.com/docker/docker/client/info.go26
-rw-r--r--vendor/github.com/docker/docker/client/interface.go194
-rw-r--r--vendor/github.com/docker/docker/client/interface_experimental.go17
-rw-r--r--vendor/github.com/docker/docker/client/interface_stable.go10
-rw-r--r--vendor/github.com/docker/docker/client/login.go29
-rw-r--r--vendor/github.com/docker/docker/client/network_connect.go18
-rw-r--r--vendor/github.com/docker/docker/client/network_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/network_disconnect.go14
-rw-r--r--vendor/github.com/docker/docker/client/network_inspect.go50
-rw-r--r--vendor/github.com/docker/docker/client/network_list.go31
-rw-r--r--vendor/github.com/docker/docker/client/network_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/network_remove.go10
-rw-r--r--vendor/github.com/docker/docker/client/node_inspect.go33
-rw-r--r--vendor/github.com/docker/docker/client/node_list.go36
-rw-r--r--vendor/github.com/docker/docker/client/node_remove.go21
-rw-r--r--vendor/github.com/docker/docker/client/node_update.go18
-rw-r--r--vendor/github.com/docker/docker/client/parse_logs.go41
-rw-r--r--vendor/github.com/docker/docker/client/ping.go32
-rw-r--r--vendor/github.com/docker/docker/client/plugin_create.go26
-rw-r--r--vendor/github.com/docker/docker/client/plugin_disable.go19
-rw-r--r--vendor/github.com/docker/docker/client/plugin_enable.go19
-rw-r--r--vendor/github.com/docker/docker/client/plugin_inspect.go32
-rw-r--r--vendor/github.com/docker/docker/client/plugin_install.go113
-rw-r--r--vendor/github.com/docker/docker/client/plugin_list.go32
-rw-r--r--vendor/github.com/docker/docker/client/plugin_push.go17
-rw-r--r--vendor/github.com/docker/docker/client/plugin_remove.go20
-rw-r--r--vendor/github.com/docker/docker/client/plugin_set.go12
-rw-r--r--vendor/github.com/docker/docker/client/plugin_upgrade.go39
-rw-r--r--vendor/github.com/docker/docker/client/request.go262
-rw-r--r--vendor/github.com/docker/docker/client/secret_create.go25
-rw-r--r--vendor/github.com/docker/docker/client/secret_inspect.go37
-rw-r--r--vendor/github.com/docker/docker/client/secret_list.go38
-rw-r--r--vendor/github.com/docker/docker/client/secret_remove.go13
-rw-r--r--vendor/github.com/docker/docker/client/secret_update.go21
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go156
-rw-r--r--vendor/github.com/docker/docker/client/service_inspect.go38
-rw-r--r--vendor/github.com/docker/docker/client/service_list.go35
-rw-r--r--vendor/github.com/docker/docker/client/service_logs.go52
-rw-r--r--vendor/github.com/docker/docker/client/service_remove.go10
-rw-r--r--vendor/github.com/docker/docker/client/service_update.go92
-rw-r--r--vendor/github.com/docker/docker/client/session.go19
-rw-r--r--vendor/github.com/docker/docker/client/swarm_get_unlock_key.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_init.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_inspect.go21
-rw-r--r--vendor/github.com/docker/docker/client/swarm_join.go13
-rw-r--r--vendor/github.com/docker/docker/client/swarm_leave.go18
-rw-r--r--vendor/github.com/docker/docker/client/swarm_unlock.go13
-rw-r--r--vendor/github.com/docker/docker/client/swarm_update.go22
-rw-r--r--vendor/github.com/docker/docker/client/task_inspect.go34
-rw-r--r--vendor/github.com/docker/docker/client/task_list.go35
-rw-r--r--vendor/github.com/docker/docker/client/task_logs.go52
-rw-r--r--vendor/github.com/docker/docker/client/transport.go25
-rw-r--r--vendor/github.com/docker/docker/client/utils.go34
-rw-r--r--vendor/github.com/docker/docker/client/version.go21
-rw-r--r--vendor/github.com/docker/docker/client/volume_create.go21
-rw-r--r--vendor/github.com/docker/docker/client/volume_inspect.go38
-rw-r--r--vendor/github.com/docker/docker/client/volume_list.go32
-rw-r--r--vendor/github.com/docker/docker/client/volume_prune.go36
-rw-r--r--vendor/github.com/docker/docker/client/volume_remove.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/README.md11
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go23
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_others.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go34
-rw-r--r--vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools.go279
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go204
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go25
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go164
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go12
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/utils_unix.go32
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/buffer.go51
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go186
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/fswriters.go162
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/readers.go154
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go18
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go92
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/writers.go66
-rw-r--r--vendor/github.com/docker/docker/pkg/longpath/longpath.go26
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags.go149
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go49
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_linux.go87
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go31
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mount.go86
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go60
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_linux.go57
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo.go54
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go95
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go12
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go69
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go58
-rw-r--r--vendor/github.com/docker/docker/pkg/pools/pools.go137
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal.go54
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_darwin.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go43
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_linux.go82
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_solaris.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_unix.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/signal_windows.go28
-rw-r--r--vendor/github.com/docker/docker/pkg/signal/trap.go104
-rw-r--r--vendor/github.com/docker/docker/pkg/stringid/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/stringid/stringid.go99
-rw-r--r--vendor/github.com/docker/docker/pkg/stringutils/README.md1
-rw-r--r--vendor/github.com/docker/docker/pkg/stringutils/stringutils.go99
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE191
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD27
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/README.md6
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/fs.go144
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/fs_unix.go15
-rw-r--r--vendor/github.com/docker/docker/pkg/symlink/fs_windows.go169
-rw-r--r--vendor/github.com/docker/docker/pkg/system/chtimes.go35
-rw-r--r--vendor/github.com/docker/docker/pkg/system/chtimes_unix.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/system/chtimes_windows.go28
-rw-r--r--vendor/github.com/docker/docker/pkg/system/errors.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/events_windows.go85
-rw-r--r--vendor/github.com/docker/docker/pkg/system/exitcode.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys.go67
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys_windows.go298
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_windows.go17
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow_unix.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lstat_unix.go19
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lstat_windows.go14
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo.go17
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_linux.go65
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go129
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/meminfo_windows.go45
-rw-r--r--vendor/github.com/docker/docker/pkg/system/mknod.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/system/mknod_windows.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_unix.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/system/process_unix.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/system/rm.go80
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_darwin.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_freebsd.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_linux.go19
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_openbsd.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_solaris.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_unix.go60
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_windows.go49
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_unix.go17
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_windows.go122
-rw-r--r--vendor/github.com/docker/docker/pkg/system/umask.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/umask_windows.go9
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go24
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_linux.go25
-rw-r--r--vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/xattrs_linux.go29
-rw-r--r--vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/term/ascii.go66
-rw-r--r--vendor/github.com/docker/docker/pkg/term/proxy.go74
-rw-r--r--vendor/github.com/docker/docker/pkg/term/tc.go21
-rw-r--r--vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go65
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term.go124
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term_windows.go237
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_bsd.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_linux.go37
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go263
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go64
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/console.go35
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/windows.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/term/winsize.go30
-rw-r--r--vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go33
-rw-r--r--vendor/github.com/docker/docker/pkg/truncindex/truncindex.go139
-rw-r--r--vendor/github.com/docker/docker/vendor.conf147
-rw-r--r--vendor/github.com/docker/go-connections/LICENSE191
-rw-r--r--vendor/github.com/docker/go-connections/README.md13
-rw-r--r--vendor/github.com/docker/go-connections/nat/nat.go242
-rw-r--r--vendor/github.com/docker/go-connections/nat/parse.go57
-rw-r--r--vendor/github.com/docker/go-connections/nat/sort.go96
-rw-r--r--vendor/github.com/docker/go-connections/sockets/README.md0
-rw-r--r--vendor/github.com/docker/go-connections/sockets/inmem_socket.go81
-rw-r--r--vendor/github.com/docker/go-connections/sockets/proxy.go51
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets.go38
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets_unix.go35
-rw-r--r--vendor/github.com/docker/go-connections/sockets/sockets_windows.go27
-rw-r--r--vendor/github.com/docker/go-connections/sockets/tcp_socket.go22
-rw-r--r--vendor/github.com/docker/go-connections/sockets/unix_socket.go32
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go18
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go14
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config.go244
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go17
-rw-r--r--vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go15
-rw-r--r--vendor/github.com/docker/go-units/LICENSE191
-rw-r--r--vendor/github.com/docker/go-units/README.md16
-rw-r--r--vendor/github.com/docker/go-units/duration.go33
-rw-r--r--vendor/github.com/docker/go-units/size.go96
-rw-r--r--vendor/github.com/docker/go-units/ulimit.go118
-rw-r--r--vendor/github.com/docker/libtrust/LICENSE191
-rw-r--r--vendor/github.com/docker/libtrust/README.md22
-rw-r--r--vendor/github.com/docker/libtrust/certificates.go175
-rw-r--r--vendor/github.com/docker/libtrust/doc.go9
-rw-r--r--vendor/github.com/docker/libtrust/ec_key.go428
-rw-r--r--vendor/github.com/docker/libtrust/filter.go50
-rw-r--r--vendor/github.com/docker/libtrust/hash.go56
-rw-r--r--vendor/github.com/docker/libtrust/jsonsign.go657
-rw-r--r--vendor/github.com/docker/libtrust/key.go253
-rw-r--r--vendor/github.com/docker/libtrust/key_files.go255
-rw-r--r--vendor/github.com/docker/libtrust/key_manager.go175
-rw-r--r--vendor/github.com/docker/libtrust/rsa_key.go427
-rw-r--r--vendor/github.com/docker/libtrust/util.go363
-rw-r--r--vendor/github.com/docker/spdystream/LICENSE191
-rw-r--r--vendor/github.com/docker/spdystream/LICENSE.docs425
-rw-r--r--vendor/github.com/docker/spdystream/README.md77
-rw-r--r--vendor/github.com/docker/spdystream/connection.go959
-rw-r--r--vendor/github.com/docker/spdystream/handlers.go38
-rw-r--r--vendor/github.com/docker/spdystream/priority.go98
-rw-r--r--vendor/github.com/docker/spdystream/spdy/dictionary.go187
-rw-r--r--vendor/github.com/docker/spdystream/spdy/read.go348
-rw-r--r--vendor/github.com/docker/spdystream/spdy/types.go275
-rw-r--r--vendor/github.com/docker/spdystream/spdy/write.go318
-rw-r--r--vendor/github.com/docker/spdystream/stream.go327
-rw-r--r--vendor/github.com/docker/spdystream/utils.go16
404 files changed, 34801 insertions, 0 deletions
diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE
new file mode 100644
index 000000000..e06d20818
--- /dev/null
+++ b/vendor/github.com/docker/distribution/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md
new file mode 100644
index 000000000..998878850
--- /dev/null
+++ b/vendor/github.com/docker/distribution/README.md
@@ -0,0 +1,130 @@
+# Distribution
+
+The Docker toolset to pack, ship, store, and deliver content.
+
+This repository's main product is the Docker Registry 2.0 implementation
+for storing and distributing Docker images. It supersedes the
+[docker/docker-registry](https://github.com/docker/docker-registry)
+project with a new API design, focused around security and performance.
+
+<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
+
+[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
+[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
+
+This repository contains the following components:
+
+|**Component** |Description |
+|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
+| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
+| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
+| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
+
+### How does this integrate with Docker engine?
+
+This project should provide an implementation to a V2 API for use in the [Docker
+core project](https://github.com/docker/docker). The API should be embeddable
+and simplify the process of securely pulling and pushing content from `docker`
+daemons.
+
+### What are the long term goals of the Distribution project?
+
+The _Distribution_ project has the further long term goal of providing a
+secure tool chain for distributing content. The specifications, APIs and tools
+should be as useful with Docker as they are without.
+
+Our goal is to design a professional grade and extensible content distribution
+system that allow users to:
+
+* Enjoy an efficient, secured and reliable way to store, manage, package and
+ exchange content
+* Hack/roll their own on top of healthy open-source components
+* Implement their own home made solution through good specs, and solid
+ extensions mechanism.
+
+## More about Registry 2.0
+
+The new registry implementation provides the following benefits:
+
+- faster push and pull
+- new, more efficient implementation
+- simplified deployment
+- pluggable storage backend
+- webhook notifications
+
+For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
+
+### Who needs to deploy a registry?
+
+By default, Docker users pull images from Docker's public registry instance.
+[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
+ability. Users can also push images to a repository on Docker's public registry,
+if they have a [Docker Hub](https://hub.docker.com/) account.
+
+For some users and even companies, this default behavior is sufficient. For
+others, it is not.
+
+For example, users with their own software products may want to maintain a
+registry for private, company images. Also, you may wish to deploy your own
+image repository for images used to test or in continuous integration. For these
+use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
+may be the better choice.
+
+### Migration to Registry 2.0
+
+For those who have previously deployed their own registry based on the Registry
+1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
+data migration is required. A tool to assist with migration efforts has been
+created. For more information see [docker/migrator](https://github.com/docker/migrator).
+
+## Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
+issues, fixes, and patches to this project. If you are contributing code, see
+the instructions for [building a development environment](BUILDING.md).
+
+## Support
+
+If any issues are encountered while using the _Distribution_ project, several
+avenues are available for support:
+
+<table>
+<tr>
+ <th align="left">
+ IRC
+ </th>
+ <td>
+ #docker-distribution on FreeNode
+ </td>
+</tr>
+<tr>
+ <th align="left">
+ Issue Tracker
+ </th>
+ <td>
+ github.com/docker/distribution/issues
+ </td>
+</tr>
+<tr>
+ <th align="left">
+ Google Groups
+ </th>
+ <td>
+ https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
+ </td>
+</tr>
+<tr>
+ <th align="left">
+ Mailing List
+ </th>
+ <td>
+ docker@dockerproject.org
+ </td>
+</tr>
+</table>
+
+
+## License
+
+This project is distributed under [Apache License, Version 2.0](LICENSE).
diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go
new file mode 100644
index 000000000..01d309029
--- /dev/null
+++ b/vendor/github.com/docker/distribution/blobs.go
@@ -0,0 +1,257 @@
+package distribution
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrBlobExists returned when blob already exists
+ ErrBlobExists = errors.New("blob exists")
+
+ // ErrBlobDigestUnsupported when blob digest is an unsupported version.
+ ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
+
+ // ErrBlobUnknown when blob is not found.
+ ErrBlobUnknown = errors.New("unknown blob")
+
+ // ErrBlobUploadUnknown returned when upload is not found.
+ ErrBlobUploadUnknown = errors.New("blob upload unknown")
+
+ // ErrBlobInvalidLength returned when the blob has an expected length on
+ // commit, meaning mismatched with the descriptor or an invalid value.
+ ErrBlobInvalidLength = errors.New("blob invalid length")
+)
+
+// ErrBlobInvalidDigest returned when digest check fails.
+type ErrBlobInvalidDigest struct {
+ Digest digest.Digest
+ Reason error
+}
+
+func (err ErrBlobInvalidDigest) Error() string {
+ return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
+ err.Digest, err.Reason)
+}
+
+// ErrBlobMounted returned when a blob is mounted from another repository
+// instead of initiating an upload session.
+type ErrBlobMounted struct {
+ From reference.Canonical
+ Descriptor Descriptor
+}
+
+func (err ErrBlobMounted) Error() string {
+ return fmt.Sprintf("blob mounted from: %v to: %v",
+ err.From, err.Descriptor)
+}
+
+// Descriptor describes targeted content. Used in conjunction with a blob
+// store, a descriptor can be used to fetch, store and target any kind of
+// blob. The struct also describes the wire protocol format. Fields should
+// only be added but never changed.
+type Descriptor struct {
+ // MediaType describe the type of the content. All text based formats are
+ // encoded as utf-8.
+ MediaType string `json:"mediaType,omitempty"`
+
+ // Size in bytes of content.
+ Size int64 `json:"size,omitempty"`
+
+ // Digest uniquely identifies the content. A byte stream can be verified
+ // against against this digest.
+ Digest digest.Digest `json:"digest,omitempty"`
+
+ // URLs contains the source URLs of this content.
+ URLs []string `json:"urls,omitempty"`
+
+ // NOTE: Before adding a field here, please ensure that all
+ // other options have been exhausted. Much of the type relationships
+ // depend on the simplicity of this type.
+}
+
+// Descriptor returns the descriptor, to make it satisfy the Describable
+// interface. Note that implementations of Describable are generally objects
+// which can be described, not simply descriptors; this exception is in place
+// to make it more convenient to pass actual descriptors to functions that
+// expect Describable objects.
+func (d Descriptor) Descriptor() Descriptor {
+ return d
+}
+
+// BlobStatter makes blob descriptors available by digest. The service may
+// provide a descriptor of a different digest if the provided digest is not
+// canonical.
+type BlobStatter interface {
+ // Stat provides metadata about a blob identified by the digest. If the
+ // blob is unknown to the describer, ErrBlobUnknown will be returned.
+ Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
+}
+
+// BlobDeleter enables deleting blobs from storage.
+type BlobDeleter interface {
+ Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// BlobEnumerator enables iterating over blobs from storage
+type BlobEnumerator interface {
+ Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
+}
+
+// BlobDescriptorService manages metadata about a blob by digest. Most
+// implementations will not expose such an interface explicitly. Such mappings
+// should be maintained by interacting with the BlobIngester. Hence, this is
+// left off of BlobService and BlobStore.
+type BlobDescriptorService interface {
+ BlobStatter
+
+ // SetDescriptor assigns the descriptor to the digest. The provided digest and
+ // the digest in the descriptor must map to identical content but they may
+ // differ on their algorithm. The descriptor must have the canonical
+ // digest of the content and the digest algorithm must match the
+ // annotators canonical algorithm.
+ //
+ // Such a facility can be used to map blobs between digest domains, with
+ // the restriction that the algorithm of the descriptor must match the
+ // canonical algorithm (ie sha256) of the annotator.
+ SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
+
+ // Clear enables descriptors to be unlinked
+ Clear(ctx context.Context, dgst digest.Digest) error
+}
+
+// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
+type BlobDescriptorServiceFactory interface {
+ BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
+}
+
+// ReadSeekCloser is the primary reader type for blob data, combining
+// io.ReadSeeker with io.Closer.
+type ReadSeekCloser interface {
+ io.ReadSeeker
+ io.Closer
+}
+
+// BlobProvider describes operations for getting blob data.
+type BlobProvider interface {
+ // Get returns the entire blob identified by digest along with the descriptor.
+ Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
+
+ // Open provides a ReadSeekCloser to the blob identified by the provided
+ // descriptor. If the blob is not known to the service, an error will be
+ // returned.
+ Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
+}
+
+// BlobServer can serve blobs via http.
+type BlobServer interface {
+ // ServeBlob attempts to serve the blob, identified by dgst, via http. The
+ // service may decide to redirect the client elsewhere or serve the data
+ // directly.
+ //
+ // This handler only issues successful responses, such as 2xx or 3xx,
+ // meaning it serves data or issues a redirect. If the blob is not
+ // available, an error will be returned and the caller may still issue a
+ // response.
+ //
+ // The implementation may serve the same blob from a different digest
+ // domain. The appropriate headers will be set for the blob, unless they
+ // have already been set by the caller.
+ ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
+}
+
+// BlobIngester ingests blob data.
+type BlobIngester interface {
+ // Put inserts the content p into the blob service, returning a descriptor
+ // or an error.
+ Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
+
+ // Create allocates a new blob writer to add a blob to this service. The
+ // returned handle can be written to and later resumed using an opaque
+ // identifier. With this approach, one can Close and Resume a BlobWriter
+ // multiple times until the BlobWriter is committed or cancelled.
+ Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
+
+ // Resume attempts to resume a write to a blob, identified by an id.
+ Resume(ctx context.Context, id string) (BlobWriter, error)
+}
+
+// BlobCreateOption is a general extensible function argument for blob creation
+// methods. A BlobIngester may choose to honor any or none of the given
+// BlobCreateOptions, which can be specific to the implementation of the
+// BlobIngester receiving them.
+// TODO (brianbland): unify this with ManifestServiceOption in the future
+type BlobCreateOption interface {
+ Apply(interface{}) error
+}
+
+// CreateOptions is a collection of blob creation modifiers relevant to general
+// blob storage intended to be configured by the BlobCreateOption.Apply method.
+type CreateOptions struct {
+ Mount struct {
+ ShouldMount bool
+ From reference.Canonical
+ // Stat allows to pass precalculated descriptor to link and return.
+ // Blob access check will be skipped if set.
+ Stat *Descriptor
+ }
+}
+
+// BlobWriter provides a handle for inserting data into a blob store.
+// Instances should be obtained from BlobWriteService.Writer and
+// BlobWriteService.Resume. If supported by the store, a writer can be
+// recovered with the id.
+type BlobWriter interface {
+ io.WriteCloser
+ io.ReaderFrom
+
+ // Size returns the number of bytes written to this blob.
+ Size() int64
+
+ // ID returns the identifier for this writer. The ID can be used with the
+ // Blob service to later resume the write.
+ ID() string
+
+ // StartedAt returns the time this blob write was started.
+ StartedAt() time.Time
+
+ // Commit completes the blob writer process. The content is verified
+ // against the provided provisional descriptor, which may result in an
+ // error. Depending on the implementation, written data may be validated
+ // against the provisional descriptor fields. If MediaType is not present,
+ // the implementation may reject the commit or assign "application/octet-
+ // stream" to the blob. The returned descriptor may have a different
+ // digest depending on the blob store, referred to as the canonical
+ // descriptor.
+ Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
+
+ // Cancel ends the blob write without storing any data and frees any
+ // associated resources. Any data written thus far will be lost. Cancel
+ // implementations should allow multiple calls even after a commit that
+ // result in a no-op. This allows use of Cancel in a defer statement,
+ // increasing the assurance that it is correctly called.
+ Cancel(ctx context.Context) error
+}
+
+// BlobService combines the operations to access, read and write blobs. This
+// can be used to describe remote blob services.
+type BlobService interface {
+ BlobStatter
+ BlobProvider
+ BlobIngester
+}
+
+// BlobStore represent the entire suite of blob related operations. Such an
+// implementation can access, read, write, delete and serve blobs.
+type BlobStore interface {
+ BlobService
+ BlobServer
+ BlobDeleter
+}
diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go
new file mode 100644
index 000000000..23cbf5b54
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/context.go
@@ -0,0 +1,85 @@
+package context
+
+import (
+ "sync"
+
+ "github.com/docker/distribution/uuid"
+ "golang.org/x/net/context"
+)
+
+// Context is a copy of Context from the golang.org/x/net/context package.
+type Context interface {
+ context.Context
+}
+
+// instanceContext is a context that provides only an instance id. It is
+// provided as the main background context.
+type instanceContext struct {
+ Context
+ id string // id of context, logged as "instance.id"
+ once sync.Once // once protect generation of the id
+}
+
+func (ic *instanceContext) Value(key interface{}) interface{} {
+ if key == "instance.id" {
+ ic.once.Do(func() {
+ // We want to lazy initialize the UUID such that we don't
+ // call a random generator from the package initialization
+ // code. For various reasons random could not be available
+ // https://github.com/docker/distribution/issues/782
+ ic.id = uuid.Generate().String()
+ })
+ return ic.id
+ }
+
+ return ic.Context.Value(key)
+}
+
+var background = &instanceContext{
+ Context: context.Background(),
+}
+
+// Background returns a non-nil, empty Context. The background context
+// provides a single key, "instance.id" that is globally unique to the
+// process.
+func Background() Context {
+ return background
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val. Use context Values only for request-scoped data that transits processes
+// and APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
+
+// stringMapContext is a simple context implementation that checks a map for a
+// key, falling back to a parent if not present.
+type stringMapContext struct {
+ context.Context
+ m map[string]interface{}
+}
+
+// WithValues returns a context that proxies lookups through a map. Only
+// supports string keys.
+func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
+ mo := make(map[string]interface{}, len(m)) // make our own copy.
+ for k, v := range m {
+ mo[k] = v
+ }
+
+ return stringMapContext{
+ Context: ctx,
+ m: mo,
+ }
+}
+
+func (smc stringMapContext) Value(key interface{}) interface{} {
+ if ks, ok := key.(string); ok {
+ if v, ok := smc.m[ks]; ok {
+ return v
+ }
+ }
+
+ return smc.Context.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go
new file mode 100644
index 000000000..9b623074e
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/doc.go
@@ -0,0 +1,89 @@
+// Package context provides several utilities for working with
+// golang.org/x/net/context in http requests. Primarily, the focus is on
+// logging relevant request information but this package is not limited to
+// that purpose.
+//
+// The easiest way to get started is to get the background context:
+//
+// ctx := context.Background()
+//
+// The returned context should be passed around your application and be the
+// root of all other context instances. If the application has a version, this
+// line should be called before anything else:
+//
+// ctx := context.WithVersion(context.Background(), version)
+//
+// The above will store the version in the context and will be available to
+// the logger.
+//
+// Logging
+//
+// The most useful aspect of this package is GetLogger. This function takes
+// any context.Context interface and returns the current logger from the
+// context. Canonical usage looks like this:
+//
+// GetLogger(ctx).Infof("something interesting happened")
+//
+// GetLogger also takes optional key arguments. The keys will be looked up in
+// the context and reported with the logger. The following example would
+// return a logger that prints the version with each log message:
+//
+// ctx := context.Context(context.Background(), "version", version)
+// GetLogger(ctx, "version").Infof("this log message has a version field")
+//
+// The above would print out a log message like this:
+//
+// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
+//
+// When used with WithLogger, we gain the ability to decorate the context with
+// loggers that have information from disparate parts of the call stack.
+// Following from the version example, we can build a new context with the
+// configured logger such that we always print the version field:
+//
+// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
+//
+// Since the logger has been pushed to the context, we can now get the version
+// field for free with our log messages. Future calls to GetLogger on the new
+// context will have the version field:
+//
+// GetLogger(ctx).Infof("this log message has a version field")
+//
+// This becomes more powerful when we start stacking loggers. Let's say we
+// have the version logger from above but also want a request id. Using the
+// context above, in our request scoped function, we place another logger in
+// the context:
+//
+// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
+// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
+//
+// When GetLogger is called on the new context, "http.request.id" will be
+// included as a logger field, along with the original "version" field:
+//
+// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
+//
+// Note that this only affects the new context, the previous context, with the
+// version field, can be used independently. Put another way, the new logger,
+// added to the request context, is unique to that context and can have
+// request scoped variables.
+//
+// HTTP Requests
+//
+// This package also contains several methods for working with http requests.
+// The concepts are very similar to those described above. We simply place the
+// request in the context using WithRequest. This makes the request variables
+// available. GetRequestLogger can then be called to get request specific
+// variables in a log line:
+//
+// ctx = WithRequest(ctx, req)
+// GetRequestLogger(ctx).Infof("request variables")
+//
+// Like above, if we want to include the request data in all log messages in
+// the context, we push the logger to a new context and use that one:
+//
+// ctx = WithLogger(ctx, GetRequestLogger(ctx))
+//
+// The concept is fairly powerful and ensures that calls throughout the stack
+// can be traced in log messages. Using the fields like "http.request.id", one
+// can analyze call flow for a particular request with a simple grep of the
+// logs.
+package context
diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go
new file mode 100644
index 000000000..7d65c8524
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/http.go
@@ -0,0 +1,366 @@
+package context
+
+import (
+ "errors"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/docker/distribution/uuid"
+ "github.com/gorilla/mux"
+ log "github.com/sirupsen/logrus"
+)
+
+// Common errors used with this package.
+var (
+ ErrNoRequestContext = errors.New("no http request in context")
+ ErrNoResponseWriterContext = errors.New("no http response in context")
+)
+
+func parseIP(ipStr string) net.IP {
+ ip := net.ParseIP(ipStr)
+ if ip == nil {
+ log.Warnf("invalid remote IP address: %q", ipStr)
+ }
+ return ip
+}
+
+// RemoteAddr extracts the remote address of the request, taking into
+// account proxy headers.
+func RemoteAddr(r *http.Request) string {
+ if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
+ proxies := strings.Split(prior, ",")
+ if len(proxies) > 0 {
+ remoteAddr := strings.Trim(proxies[0], " ")
+ if parseIP(remoteAddr) != nil {
+ return remoteAddr
+ }
+ }
+ }
+ // X-Real-Ip is less supported, but worth checking in the
+ // absence of X-Forwarded-For
+ if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
+ if parseIP(realIP) != nil {
+ return realIP
+ }
+ }
+
+ return r.RemoteAddr
+}
+
+// RemoteIP extracts the remote IP of the request, taking into
+// account proxy headers.
+func RemoteIP(r *http.Request) string {
+ addr := RemoteAddr(r)
+
+ // Try parsing it as "IP:port"
+ if ip, _, err := net.SplitHostPort(addr); err == nil {
+ return ip
+ }
+
+ return addr
+}
+
+// WithRequest places the request on the context. The context of the request
+// is assigned a unique id, available at "http.request.id". The request itself
+// is available at "http.request". Other common attributes are available under
+// the prefix "http.request.". If a request is already present on the context,
+// this method will panic.
+func WithRequest(ctx Context, r *http.Request) Context {
+ if ctx.Value("http.request") != nil {
+ // NOTE(stevvooe): This needs to be considered a programming error. It
+ // is unlikely that we'd want to have more than one request in
+ // context.
+ panic("only one request per context")
+ }
+
+ return &httpRequestContext{
+ Context: ctx,
+ startedAt: time.Now(),
+ id: uuid.Generate().String(),
+ r: r,
+ }
+}
+
+// GetRequest returns the http request in the given context. Returns
+// ErrNoRequestContext if the context does not have an http request associated
+// with it.
+func GetRequest(ctx Context) (*http.Request, error) {
+ if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
+ return r, nil
+ }
+ return nil, ErrNoRequestContext
+}
+
+// GetRequestID attempts to resolve the current request id, if possible. An
+// error is return if it is not available on the context.
+func GetRequestID(ctx Context) string {
+ return GetStringValue(ctx, "http.request.id")
+}
+
+// WithResponseWriter returns a new context and response writer that makes
+// interesting response statistics available within the context.
+func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
+ if closeNotifier, ok := w.(http.CloseNotifier); ok {
+ irwCN := &instrumentedResponseWriterCN{
+ instrumentedResponseWriter: instrumentedResponseWriter{
+ ResponseWriter: w,
+ Context: ctx,
+ },
+ CloseNotifier: closeNotifier,
+ }
+
+ return irwCN, irwCN
+ }
+
+ irw := instrumentedResponseWriter{
+ ResponseWriter: w,
+ Context: ctx,
+ }
+ return &irw, &irw
+}
+
+// GetResponseWriter returns the http.ResponseWriter from the provided
+// context. If not present, ErrNoResponseWriterContext is returned. The
+// returned instance provides instrumentation in the context.
+func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
+ v := ctx.Value("http.response")
+
+ rw, ok := v.(http.ResponseWriter)
+ if !ok || rw == nil {
+ return nil, ErrNoResponseWriterContext
+ }
+
+ return rw, nil
+}
+
+// getVarsFromRequest let's us change request vars implementation for testing
+// and maybe future changes.
+var getVarsFromRequest = mux.Vars
+
+// WithVars extracts gorilla/mux vars and makes them available on the returned
+// context. Variables are available at keys with the prefix "vars.". For
+// example, if looking for the variable "name", it can be accessed as
+// "vars.name". Implementations that are accessing values need not know that
+// the underlying context is implemented with gorilla/mux vars.
+func WithVars(ctx Context, r *http.Request) Context {
+ return &muxVarsContext{
+ Context: ctx,
+ vars: getVarsFromRequest(r),
+ }
+}
+
+// GetRequestLogger returns a logger that contains fields from the request in
+// the current context. If the request is not available in the context, no
+// fields will display. Request loggers can safely be pushed onto the context.
+func GetRequestLogger(ctx Context) Logger {
+ return GetLogger(ctx,
+ "http.request.id",
+ "http.request.method",
+ "http.request.host",
+ "http.request.uri",
+ "http.request.referer",
+ "http.request.useragent",
+ "http.request.remoteaddr",
+ "http.request.contenttype")
+}
+
+// GetResponseLogger reads the current response stats and builds a logger.
+// Because the values are read at call time, pushing a logger returned from
+// this function on the context will lead to missing or invalid data. Only
+// call this at the end of a request, after the response has been written.
+func GetResponseLogger(ctx Context) Logger {
+ l := getLogrusLogger(ctx,
+ "http.response.written",
+ "http.response.status",
+ "http.response.contenttype")
+
+ duration := Since(ctx, "http.request.startedat")
+
+ if duration > 0 {
+ l = l.WithField("http.response.duration", duration.String())
+ }
+
+ return l
+}
+
+// httpRequestContext makes information about a request available to context.
+type httpRequestContext struct {
+ Context
+
+ startedAt time.Time
+ id string
+ r *http.Request
+}
+
+// Value returns a keyed element of the request for use in the context. To get
+// the request itself, query "request". For other components, access them as
+// "request.<component>". For example, r.RequestURI
+func (ctx *httpRequestContext) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "http.request" {
+ return ctx.r
+ }
+
+ if !strings.HasPrefix(keyStr, "http.request.") {
+ goto fallback
+ }
+
+ parts := strings.Split(keyStr, ".")
+
+ if len(parts) != 3 {
+ goto fallback
+ }
+
+ switch parts[2] {
+ case "uri":
+ return ctx.r.RequestURI
+ case "remoteaddr":
+ return RemoteAddr(ctx.r)
+ case "method":
+ return ctx.r.Method
+ case "host":
+ return ctx.r.Host
+ case "referer":
+ referer := ctx.r.Referer()
+ if referer != "" {
+ return referer
+ }
+ case "useragent":
+ return ctx.r.UserAgent()
+ case "id":
+ return ctx.id
+ case "startedat":
+ return ctx.startedAt
+ case "contenttype":
+ ct := ctx.r.Header.Get("Content-Type")
+ if ct != "" {
+ return ct
+ }
+ }
+ }
+
+fallback:
+ return ctx.Context.Value(key)
+}
+
+type muxVarsContext struct {
+ Context
+ vars map[string]string
+}
+
+func (ctx *muxVarsContext) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "vars" {
+ return ctx.vars
+ }
+
+ if strings.HasPrefix(keyStr, "vars.") {
+ keyStr = strings.TrimPrefix(keyStr, "vars.")
+ }
+
+ if v, ok := ctx.vars[keyStr]; ok {
+ return v
+ }
+ }
+
+ return ctx.Context.Value(key)
+}
+
+// instrumentedResponseWriterCN provides response writer information in a
+// context. It implements http.CloseNotifier so that users can detect
+// early disconnects.
+type instrumentedResponseWriterCN struct {
+ instrumentedResponseWriter
+ http.CloseNotifier
+}
+
+// instrumentedResponseWriter provides response writer information in a
+// context. This variant is only used in the case where CloseNotifier is not
+// implemented by the parent ResponseWriter.
+type instrumentedResponseWriter struct {
+ http.ResponseWriter
+ Context
+
+ mu sync.Mutex
+ status int
+ written int64
+}
+
+func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
+ n, err = irw.ResponseWriter.Write(p)
+
+ irw.mu.Lock()
+ irw.written += int64(n)
+
+ // Guess the likely status if not set.
+ if irw.status == 0 {
+ irw.status = http.StatusOK
+ }
+
+ irw.mu.Unlock()
+
+ return
+}
+
+func (irw *instrumentedResponseWriter) WriteHeader(status int) {
+ irw.ResponseWriter.WriteHeader(status)
+
+ irw.mu.Lock()
+ irw.status = status
+ irw.mu.Unlock()
+}
+
+func (irw *instrumentedResponseWriter) Flush() {
+ if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
+ flusher.Flush()
+ }
+}
+
+func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "http.response" {
+ return irw
+ }
+
+ if !strings.HasPrefix(keyStr, "http.response.") {
+ goto fallback
+ }
+
+ parts := strings.Split(keyStr, ".")
+
+ if len(parts) != 3 {
+ goto fallback
+ }
+
+ irw.mu.Lock()
+ defer irw.mu.Unlock()
+
+ switch parts[2] {
+ case "written":
+ return irw.written
+ case "status":
+ return irw.status
+ case "contenttype":
+ contentType := irw.Header().Get("Content-Type")
+ if contentType != "" {
+ return contentType
+ }
+ }
+ }
+
+fallback:
+ return irw.Context.Value(key)
+}
+
+func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
+ if keyStr, ok := key.(string); ok {
+ if keyStr == "http.response" {
+ return irw
+ }
+ }
+
+ return irw.instrumentedResponseWriter.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go
new file mode 100644
index 000000000..86c5964e4
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/logger.go
@@ -0,0 +1,116 @@
+package context
+
+import (
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+ "runtime"
+)
+
+// Logger provides a leveled-logging interface.
+type Logger interface {
+ // standard logger methods
+ Print(args ...interface{})
+ Printf(format string, args ...interface{})
+ Println(args ...interface{})
+
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fatalln(args ...interface{})
+
+ Panic(args ...interface{})
+ Panicf(format string, args ...interface{})
+ Panicln(args ...interface{})
+
+ // Leveled methods, from logrus
+ Debug(args ...interface{})
+ Debugf(format string, args ...interface{})
+ Debugln(args ...interface{})
+
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Errorln(args ...interface{})
+
+ Info(args ...interface{})
+ Infof(format string, args ...interface{})
+ Infoln(args ...interface{})
+
+ Warn(args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warnln(args ...interface{})
+}
+
+// WithLogger creates a new context with provided logger.
+func WithLogger(ctx Context, logger Logger) Context {
+ return WithValue(ctx, "logger", logger)
+}
+
+// GetLoggerWithField returns a logger instance with the specified field key
+// and value without affecting the context. Extra specified keys will be
+// resolved from the context.
+func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
+ return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
+}
+
+// GetLoggerWithFields returns a logger instance with the specified fields
+// without affecting the context. Extra specified keys will be resolved from
+// the context.
+func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
+ // must convert from interface{} -> interface{} to string -> interface{} for logrus.
+ lfields := make(logrus.Fields, len(fields))
+ for key, value := range fields {
+ lfields[fmt.Sprint(key)] = value
+ }
+
+ return getLogrusLogger(ctx, keys...).WithFields(lfields)
+}
+
+// GetLogger returns the logger from the current context, if present. If one
+// or more keys are provided, they will be resolved on the context and
+// included in the logger. While context.Value takes an interface, any key
+// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
+// a logging key field. If context keys are integer constants, for example,
+// its recommended that a String method is implemented.
+func GetLogger(ctx Context, keys ...interface{}) Logger {
+ return getLogrusLogger(ctx, keys...)
+}
+
+// GetLogrusLogger returns the logrus logger for the context. If one more keys
+// are provided, they will be resolved on the context and included in the
+// logger. Only use this function if specific logrus functionality is
+// required.
+func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
+ var logger *logrus.Entry
+
+ // Get a logger, if it is present.
+ loggerInterface := ctx.Value("logger")
+ if loggerInterface != nil {
+ if lgr, ok := loggerInterface.(*logrus.Entry); ok {
+ logger = lgr
+ }
+ }
+
+ if logger == nil {
+ fields := logrus.Fields{}
+
+ // Fill in the instance id, if we have it.
+ instanceID := ctx.Value("instance.id")
+ if instanceID != nil {
+ fields["instance.id"] = instanceID
+ }
+
+ fields["go.version"] = runtime.Version()
+ // If no logger is found, just return the standard logger.
+ logger = logrus.StandardLogger().WithFields(fields)
+ }
+
+ fields := logrus.Fields{}
+ for _, key := range keys {
+ v := ctx.Value(key)
+ if v != nil {
+ fields[fmt.Sprint(key)] = v
+ }
+ }
+
+ return logger.WithFields(fields)
+}
diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go
new file mode 100644
index 000000000..721964a84
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/trace.go
@@ -0,0 +1,104 @@
+package context
+
+import (
+ "runtime"
+ "time"
+
+ "github.com/docker/distribution/uuid"
+)
+
+// WithTrace allocates a traced timing span in a new context. This allows a
+// caller to track the time between calling WithTrace and the returned done
+// function. When the done function is called, a log message is emitted with a
+// "trace.duration" field, corresponding to the elapsed time and a
+// "trace.func" field, corresponding to the function that called WithTrace.
+//
+// The logging keys "trace.id" and "trace.parent.id" are provided to implement
+// dapper-like tracing. This function should be complemented with a WithSpan
+// method that could be used for tracing distributed RPC calls.
+//
+// The main benefit of this function is to post-process log messages or
+// intercept them in a hook to provide timing data. Trace ids and parent ids
+// can also be linked to provide call tracing, if so required.
+//
+// Here is an example of the usage:
+//
+// func timedOperation(ctx Context) {
+// ctx, done := WithTrace(ctx)
+// defer done("this will be the log message")
+// // ... function body ...
+// }
+//
+// If the function ran for roughly 1s, such a usage would emit a log message
+// as follows:
+//
+// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
+//
+// Notice that the function name is automatically resolved, along with the
+// package and a trace id is emitted that can be linked with parent ids.
+func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
+ if ctx == nil {
+ ctx = Background()
+ }
+
+ pc, file, line, _ := runtime.Caller(1)
+ f := runtime.FuncForPC(pc)
+ ctx = &traced{
+ Context: ctx,
+ id: uuid.Generate().String(),
+ start: time.Now(),
+ parent: GetStringValue(ctx, "trace.id"),
+ fnname: f.Name(),
+ file: file,
+ line: line,
+ }
+
+ return ctx, func(format string, a ...interface{}) {
+ GetLogger(ctx,
+ "trace.duration",
+ "trace.id",
+ "trace.parent.id",
+ "trace.func",
+ "trace.file",
+ "trace.line").
+ Debugf(format, a...)
+ }
+}
+
+// traced represents a context that is traced for function call timing. It
+// also provides fast lookup for the various attributes that are available on
+// the trace.
+type traced struct {
+ Context
+ id string
+ parent string
+ start time.Time
+ fnname string
+ file string
+ line int
+}
+
+func (ts *traced) Value(key interface{}) interface{} {
+ switch key {
+ case "trace.start":
+ return ts.start
+ case "trace.duration":
+ return time.Since(ts.start)
+ case "trace.id":
+ return ts.id
+ case "trace.parent.id":
+ if ts.parent == "" {
+ return nil // must return nil to signal no parent.
+ }
+
+ return ts.parent
+ case "trace.func":
+ return ts.fnname
+ case "trace.file":
+ return ts.file
+ case "trace.line":
+ return ts.line
+ }
+
+ return ts.Context.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go
new file mode 100644
index 000000000..cb9ef52e3
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/util.go
@@ -0,0 +1,24 @@
+package context
+
+import (
+ "time"
+)
+
+// Since looks up key, which should be a time.Time, and returns the duration
+// since that time. If the key is not found, the value returned will be zero.
+// This is helpful when inferring metrics related to context execution times.
+func Since(ctx Context, key interface{}) time.Duration {
+ if startedAt, ok := ctx.Value(key).(time.Time); ok {
+ return time.Since(startedAt)
+ }
+ return 0
+}
+
+// GetStringValue returns a string value from the context. The empty string
+// will be returned if not found.
+func GetStringValue(ctx Context, key interface{}) (value string) {
+ if valuev, ok := ctx.Value(key).(string); ok {
+ value = valuev
+ }
+ return value
+}
diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go
new file mode 100644
index 000000000..746cda02e
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/version.go
@@ -0,0 +1,16 @@
+package context
+
+// WithVersion stores the application version in the context. The new context
+// gets a logger to ensure log messages are marked with the application
+// version.
+func WithVersion(ctx Context, version string) Context {
+ ctx = WithValue(ctx, "version", version)
+ // push a new logger onto the stack
+ return WithLogger(ctx, GetLogger(ctx, "version"))
+}
+
+// GetVersion returns the application version from the context. An empty
+// string may returned if the version was not set on the context.
+func GetVersion(ctx Context) string {
+ return GetStringValue(ctx, "version")
+}
diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go
new file mode 100644
index 000000000..71327dca7
--- /dev/null
+++ b/vendor/github.com/docker/distribution/digestset/set.go
@@ -0,0 +1,247 @@
+package digestset
+
+import (
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrDigestNotFound is used when a matching digest
+ // could not be found in a set.
+ ErrDigestNotFound = errors.New("digest not found")
+
+ // ErrDigestAmbiguous is used when multiple digests
+ // are found in a set. None of the matching digests
+ // should be considered valid matches.
+ ErrDigestAmbiguous = errors.New("ambiguous digest string")
+)
+
+// Set is used to hold a unique set of digests which
+// may be easily referenced by easily referenced by a string
+// representation of the digest as well as short representation.
+// The uniqueness of the short representation is based on other
+// digests in the set. If digests are omitted from this set,
+// collisions in a larger set may not be detected, therefore it
+// is important to always do short representation lookups on
+// the complete set of digests. To mitigate collisions, an
+// appropriately long short code should be used.
+type Set struct {
+ mutex sync.RWMutex
+ entries digestEntries
+}
+
+// NewSet creates an empty set of digests
+// which may have digests added.
+func NewSet() *Set {
+ return &Set{
+ entries: digestEntries{},
+ }
+}
+
+// checkShortMatch checks whether two digests match as either whole
+// values or short values. This function does not test equality,
+// rather whether the second value could match against the first
+// value.
+func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
+ if len(hex) == len(shortHex) {
+ if hex != shortHex {
+ return false
+ }
+ if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ } else if !strings.HasPrefix(hex, shortHex) {
+ return false
+ } else if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ return true
+}
+
+// Lookup looks for a digest matching the given string representation.
+// If no digests could be found ErrDigestNotFound will be returned
+// with an empty digest value. If multiple matches are found
+// ErrDigestAmbiguous will be returned with an empty digest value.
+func (dst *Set) Lookup(d string) (digest.Digest, error) {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ if len(dst.entries) == 0 {
+ return "", ErrDigestNotFound
+ }
+ var (
+ searchFunc func(int) bool
+ alg digest.Algorithm
+ hex string
+ )
+ dgst, err := digest.Parse(d)
+ if err == digest.ErrDigestInvalidFormat {
+ hex = d
+ searchFunc = func(i int) bool {
+ return dst.entries[i].val >= d
+ }
+ } else {
+ hex = dgst.Hex()
+ alg = dgst.Algorithm()
+ searchFunc = func(i int) bool {
+ if dst.entries[i].val == hex {
+ return dst.entries[i].alg >= alg
+ }
+ return dst.entries[i].val >= hex
+ }
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
+ return "", ErrDigestNotFound
+ }
+ if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
+ return dst.entries[idx].digest, nil
+ }
+ if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
+ return "", ErrDigestAmbiguous
+ }
+
+ return dst.entries[idx].digest, nil
+}
+
+// Add adds the given digest to the set. An error will be returned
+// if the given digest is invalid. If the digest already exists in the
+// set, this operation will be a no-op.
+func (dst *Set) Add(d digest.Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) {
+ dst.entries = append(dst.entries, entry)
+ return nil
+ } else if dst.entries[idx].digest == d {
+ return nil
+ }
+
+ entries := append(dst.entries, nil)
+ copy(entries[idx+1:], entries[idx:len(entries)-1])
+ entries[idx] = entry
+ dst.entries = entries
+ return nil
+}
+
+// Remove removes the given digest from the set. An err will be
+// returned if the given digest is invalid. If the digest does
+// not exist in the set, this operation will be a no-op.
+func (dst *Set) Remove(d digest.Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ // Not found if idx is after or value at idx is not digest
+ if idx == len(dst.entries) || dst.entries[idx].digest != d {
+ return nil
+ }
+
+ entries := dst.entries
+ copy(entries[idx:], entries[idx+1:])
+ entries = entries[:len(entries)-1]
+ dst.entries = entries
+
+ return nil
+}
+
+// All returns all the digests in the set
+func (dst *Set) All() []digest.Digest {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ retValues := make([]digest.Digest, len(dst.entries))
+ for i := range dst.entries {
+ retValues[i] = dst.entries[i].digest
+ }
+
+ return retValues
+}
+
+// ShortCodeTable returns a map of Digest to unique short codes. The
+// length represents the minimum value, the maximum length may be the
+// entire value of digest if uniqueness cannot be achieved without the
+// full value. This function will attempt to make short codes as short
+// as possible to be unique.
+func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ m := make(map[digest.Digest]string, len(dst.entries))
+ l := length
+ resetIdx := 0
+ for i := 0; i < len(dst.entries); i++ {
+ var short string
+ extended := true
+ for extended {
+ extended = false
+ if len(dst.entries[i].val) <= l {
+ short = dst.entries[i].digest.String()
+ } else {
+ short = dst.entries[i].val[:l]
+ for j := i + 1; j < len(dst.entries); j++ {
+ if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
+ if j > resetIdx {
+ resetIdx = j
+ }
+ extended = true
+ } else {
+ break
+ }
+ }
+ if extended {
+ l++
+ }
+ }
+ }
+ m[dst.entries[i].digest] = short
+ if i >= resetIdx {
+ l = length
+ }
+ }
+ return m
+}
+
+type digestEntry struct {
+ alg digest.Algorithm
+ val string
+ digest digest.Digest
+}
+
+type digestEntries []*digestEntry
+
+func (d digestEntries) Len() int {
+ return len(d)
+}
+
+func (d digestEntries) Less(i, j int) bool {
+ if d[i].val != d[j].val {
+ return d[i].val < d[j].val
+ }
+ return d[i].alg < d[j].alg
+}
+
+func (d digestEntries) Swap(i, j int) {
+ d[i], d[j] = d[j], d[i]
+}
diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go
new file mode 100644
index 000000000..bdd8cb708
--- /dev/null
+++ b/vendor/github.com/docker/distribution/doc.go
@@ -0,0 +1,7 @@
+// Package distribution will define the interfaces for the components of
+// docker distribution. The goal is to allow users to reliably package, ship
+// and store content related to docker images.
+//
+// This is currently a work in progress. More details are available in the
+// README.md.
+package distribution
diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go
new file mode 100644
index 000000000..020d33258
--- /dev/null
+++ b/vendor/github.com/docker/distribution/errors.go
@@ -0,0 +1,115 @@
+package distribution
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+// ErrAccessDenied is returned when an access to a requested resource is
+// denied.
+var ErrAccessDenied = errors.New("access denied")
+
+// ErrManifestNotModified is returned when a conditional manifest GetByTag
+// returns nil due to the client indicating it has the latest version
+var ErrManifestNotModified = errors.New("manifest not modified")
+
+// ErrUnsupported is returned when an unimplemented or unsupported action is
+// performed
+var ErrUnsupported = errors.New("operation unsupported")
+
+// ErrTagUnknown is returned if the given tag is not known by the tag service
+type ErrTagUnknown struct {
+ Tag string
+}
+
+func (err ErrTagUnknown) Error() string {
+ return fmt.Sprintf("unknown tag=%s", err.Tag)
+}
+
+// ErrRepositoryUnknown is returned if the named repository is not known by
+// the registry.
+type ErrRepositoryUnknown struct {
+ Name string
+}
+
+func (err ErrRepositoryUnknown) Error() string {
+ return fmt.Sprintf("unknown repository name=%s", err.Name)
+}
+
+// ErrRepositoryNameInvalid should be used to denote an invalid repository
+// name. Reason may set, indicating the cause of invalidity.
+type ErrRepositoryNameInvalid struct {
+ Name string
+ Reason error
+}
+
+func (err ErrRepositoryNameInvalid) Error() string {
+ return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
+}
+
+// ErrManifestUnknown is returned if the manifest is not known by the
+// registry.
+type ErrManifestUnknown struct {
+ Name string
+ Tag string
+}
+
+func (err ErrManifestUnknown) Error() string {
+ return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
+}
+
+// ErrManifestUnknownRevision is returned when a manifest cannot be found by
+// revision within a repository.
+type ErrManifestUnknownRevision struct {
+ Name string
+ Revision digest.Digest
+}
+
+func (err ErrManifestUnknownRevision) Error() string {
+ return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
+}
+
+// ErrManifestUnverified is returned when the registry is unable to verify
+// the manifest.
+type ErrManifestUnverified struct{}
+
+func (ErrManifestUnverified) Error() string {
+ return "unverified manifest"
+}
+
+// ErrManifestVerification provides a type to collect errors encountered
+// during manifest verification. Currently, it accepts errors of all types,
+// but it may be narrowed to those involving manifest verification.
+type ErrManifestVerification []error
+
+func (errs ErrManifestVerification) Error() string {
+ var parts []string
+ for _, err := range errs {
+ parts = append(parts, err.Error())
+ }
+
+ return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
+}
+
+// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
+type ErrManifestBlobUnknown struct {
+ Digest digest.Digest
+}
+
+func (err ErrManifestBlobUnknown) Error() string {
+ return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
+}
+
+// ErrManifestNameInvalid should be used to denote an invalid manifest
+// name. Reason may set, indicating the cause of invalidity.
+type ErrManifestNameInvalid struct {
+ Name string
+ Reason error
+}
+
+func (err ErrManifestNameInvalid) Error() string {
+ return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
+}
diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go
new file mode 100644
index 000000000..2c99f25d3
--- /dev/null
+++ b/vendor/github.com/docker/distribution/manifests.go
@@ -0,0 +1,125 @@
+package distribution
+
+import (
+ "fmt"
+ "mime"
+
+ "github.com/docker/distribution/context"
+ "github.com/opencontainers/go-digest"
+)
+
+// Manifest represents a registry object specifying a set of
+// references and an optional target
+type Manifest interface {
+ // References returns a list of objects which make up this manifest.
+ // A reference is anything which can be represented by a
+ // distribution.Descriptor. These can consist of layers, resources or other
+ // manifests.
+ //
+ // While no particular order is required, implementations should return
+ // them from highest to lowest priority. For example, one might want to
+ // return the base layer before the top layer.
+ References() []Descriptor
+
+ // Payload provides the serialized format of the manifest, in addition to
+ // the media type.
+ Payload() (mediaType string, payload []byte, err error)
+}
+
+// ManifestBuilder creates a manifest allowing one to include dependencies.
+// Instances can be obtained from a version-specific manifest package. Manifest
+// specific data is passed into the function which creates the builder.
+type ManifestBuilder interface {
+ // Build creates the manifest from his builder.
+ Build(ctx context.Context) (Manifest, error)
+
+ // References returns a list of objects which have been added to this
+ // builder. The dependencies are returned in the order they were added,
+ // which should be from base to head.
+ References() []Descriptor
+
+ // AppendReference includes the given object in the manifest after any
+ // existing dependencies. If the add fails, such as when adding an
+ // unsupported dependency, an error may be returned.
+ //
+ // The destination of the reference is dependent on the manifest type and
+ // the dependency type.
+ AppendReference(dependency Describable) error
+}
+
+// ManifestService describes operations on image manifests.
+type ManifestService interface {
+ // Exists returns true if the manifest exists.
+ Exists(ctx context.Context, dgst digest.Digest) (bool, error)
+
+ // Get retrieves the manifest specified by the given digest
+ Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
+
+ // Put creates or updates the given manifest returning the manifest digest
+ Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
+
+ // Delete removes the manifest specified by the given digest. Deleting
+ // a manifest that doesn't exist will return ErrManifestNotFound
+ Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// ManifestEnumerator enables iterating over manifests
+type ManifestEnumerator interface {
+ // Enumerate calls ingester for each manifest.
+ Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
+}
+
+// Describable is an interface for descriptors
+type Describable interface {
+ Descriptor() Descriptor
+}
+
+// ManifestMediaTypes returns the supported media types for manifests.
+func ManifestMediaTypes() (mediaTypes []string) {
+ for t := range mappings {
+ if t != "" {
+ mediaTypes = append(mediaTypes, t)
+ }
+ }
+ return
+}
+
+// UnmarshalFunc implements manifest unmarshalling a given MediaType
+type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
+
+var mappings = make(map[string]UnmarshalFunc, 0)
+
+// UnmarshalManifest looks up manifest unmarshal functions based on
+// MediaType
+func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
+ // Need to look up by the actual media type, not the raw contents of
+ // the header. Strip semicolons and anything following them.
+ var mediaType string
+ if ctHeader != "" {
+ var err error
+ mediaType, _, err = mime.ParseMediaType(ctHeader)
+ if err != nil {
+ return nil, Descriptor{}, err
+ }
+ }
+
+ unmarshalFunc, ok := mappings[mediaType]
+ if !ok {
+ unmarshalFunc, ok = mappings[""]
+ if !ok {
+ return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
+ }
+ }
+
+ return unmarshalFunc(p)
+}
+
+// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
+// should be called from specific
+func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
+ if _, ok := mappings[mediaType]; ok {
+ return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
+ }
+ mappings[mediaType] = u
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go
new file mode 100644
index 000000000..978df7eab
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See https://godoc.org/path#Match for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go
new file mode 100644
index 000000000..2d71fc5e9
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/normalize.go
@@ -0,0 +1,170 @@
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/docker/distribution/digestset"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ legacyDefaultDomain = "index.docker.io"
+ defaultDomain = "docker.io"
+ officialRepoName = "library"
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remoteName string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remoteName = remainder[:tagSep]
+ } else {
+ remoteName = remainder
+ }
+ if strings.ToLower(remoteName) != remoteName {
+ return nil, errors.New("invalid reference format: repository name must be lowercase")
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// splitDockerDomain splits a repository name to domain and remotename string.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remainder string) {
+ i := strings.IndexRune(name, '/')
+ if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
+ domain, remainder = defaultDomain, name
+ } else {
+ domain, remainder = name[:i], name[i+1:]
+ }
+ if domain == legacyDefaultDomain {
+ domain = defaultDomain
+ }
+ if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
+ remainder = officialRepoName + "/" + remainder
+ }
+ return
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/<official repo name>"
+ if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
+ repo.path = split[1]
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
+
+// ParseAnyReferenceWithSet parses a reference string as a possible short
+// identifier to be matched in a digest set, a full digest, or familiar name.
+func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
+ if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
+ dgst, err := ds.Lookup(ref)
+ if err == nil {
+ return digestReference(dgst), nil
+ }
+ } else {
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
new file mode 100644
index 000000000..2f66cca87
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/reference.go
@@ -0,0 +1,433 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] path-component ['/' path-component]*
+// domain := domain-component ['.' domain-component]* [':' port-number]
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+// short-identifier := /[a-f0-9]{6,64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the Named reference
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the Named reference
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+// DEPRECATED: Use Domain or Path
+func SplitHostname(named Named) (string, string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain(), r.Path()
+ }
+ return splitDomain(named.Name())
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if nameMatch != nil && len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ domain, path := SplitHostname(ref)
+ return repository{
+ domain: domain,
+ path: path,
+ }
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go
new file mode 100644
index 000000000..786034932
--- /dev/null
+++ b/vendor/github.com/docker/distribution/reference/regexp.go
@@ -0,0 +1,143 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // domainComponentRegexp restricts the registry domain component of a
+ // repository name to start with a component as defined by DomainRegexp
+ // and followed by an optional port.
+ domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // DomainRegexp defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ DomainRegexp = expression(
+ domainComponentRegexp,
+ optional(repeated(literal(`.`), domainComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the domain and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(DomainRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(DomainRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+
+ // IdentifierRegexp is the format for string identifier used as a
+ // content addressable identifier using sha256. These identifiers
+ // are like digests without the algorithm, since sha256 is used.
+ IdentifierRegexp = match(`([a-f0-9]{64})`)
+
+ // ShortIdentifierRegexp is the format used to represent a prefix
+ // of an identifier. A prefix may be used to match a sha256 identifier
+ // within a list of trusted identifiers.
+ ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
+
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = anchored(IdentifierRegexp)
+
+ // anchoredShortIdentifierRegexp is used to check if a value
+ // is a possible identifier prefix, anchored at start and end
+ // of string.
+ anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go
new file mode 100644
index 000000000..1da1d533f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry.go
@@ -0,0 +1,97 @@
+package distribution
+
+import (
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+)
+
+// Scope defines the set of items that match a namespace.
+type Scope interface {
+ // Contains returns true if the name belongs to the namespace.
+ Contains(name string) bool
+}
+
+type fullScope struct{}
+
+func (f fullScope) Contains(string) bool {
+ return true
+}
+
+// GlobalScope represents the full namespace scope which contains
+// all other scopes.
+var GlobalScope = Scope(fullScope{})
+
+// Namespace represents a collection of repositories, addressable by name.
+// Generally, a namespace is backed by a set of one or more services,
+// providing facilities such as registry access, trust, and indexing.
+type Namespace interface {
+ // Scope describes the names that can be used with this Namespace. The
+ // global namespace will have a scope that matches all names. The scope
+ // effectively provides an identity for the namespace.
+ Scope() Scope
+
+ // Repository should return a reference to the named repository. The
+ // registry may or may not have the repository but should always return a
+ // reference.
+ Repository(ctx context.Context, name reference.Named) (Repository, error)
+
+ // Repositories fills 'repos' with a lexicographically sorted catalog of repositories
+ // up to the size of 'repos' and returns the value 'n' for the number of entries
+ // which were filled. 'last' contains an offset in the catalog, and 'err' will be
+ // set to io.EOF if there are no more entries to obtain.
+ Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+
+ // Blobs returns a blob enumerator to access all blobs
+ Blobs() BlobEnumerator
+
+ // BlobStatter returns a BlobStatter to control
+ BlobStatter() BlobStatter
+}
+
+// RepositoryEnumerator describes an operation to enumerate repositories
+type RepositoryEnumerator interface {
+ Enumerate(ctx context.Context, ingester func(string) error) error
+}
+
+// ManifestServiceOption is a function argument for Manifest Service methods
+type ManifestServiceOption interface {
+ Apply(ManifestService) error
+}
+
+// WithTag allows a tag to be passed into Put
+func WithTag(tag string) ManifestServiceOption {
+ return WithTagOption{tag}
+}
+
+// WithTagOption holds a tag
+type WithTagOption struct{ Tag string }
+
+// Apply conforms to the ManifestServiceOption interface
+func (o WithTagOption) Apply(m ManifestService) error {
+ // no implementation
+ return nil
+}
+
+// Repository is a named collection of manifests and layers.
+type Repository interface {
+ // Named returns the name of the repository.
+ Named() reference.Named
+
+ // Manifests returns a reference to this repository's manifest service.
+ // with the supplied options applied.
+ Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
+
+ // Blobs returns a reference to this repository's blob service.
+ Blobs(ctx context.Context) BlobStore
+
+ // TODO(stevvooe): The above BlobStore return can probably be relaxed to
+ // be a BlobService for use with clients. This will allow such
+ // implementations to avoid implementing ServeBlob.
+
+ // Tags returns a reference to this repositories tag service
+ Tags(ctx context.Context) TagService
+}
+
+// TODO(stevvooe): Must add close methods to all these. May want to change the
+// way instances are created to better reflect internal dependency
+// relationships.
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
new file mode 100644
index 000000000..6d9bb4b62
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
@@ -0,0 +1,267 @@
+package errcode
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// ErrorCoder is the base interface for ErrorCode and Error allowing
+// users of each to just call ErrorCode to get the real ID of each
+type ErrorCoder interface {
+ ErrorCode() ErrorCode
+}
+
+// ErrorCode represents the error type. The errors are serialized via strings
+// and the integer format may change and should *never* be exported.
+type ErrorCode int
+
+var _ error = ErrorCode(0)
+
+// ErrorCode just returns itself
+func (ec ErrorCode) ErrorCode() ErrorCode {
+ return ec
+}
+
+// Error returns the ID/Value
+func (ec ErrorCode) Error() string {
+ // NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
+ return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
+}
+
+// Descriptor returns the descriptor for the error code.
+func (ec ErrorCode) Descriptor() ErrorDescriptor {
+ d, ok := errorCodeToDescriptors[ec]
+
+ if !ok {
+ return ErrorCodeUnknown.Descriptor()
+ }
+
+ return d
+}
+
+// String returns the canonical identifier for this error code.
+func (ec ErrorCode) String() string {
+ return ec.Descriptor().Value
+}
+
+// Message returned the human-readable error message for this error code.
+func (ec ErrorCode) Message() string {
+ return ec.Descriptor().Message
+}
+
+// MarshalText encodes the receiver into UTF-8-encoded text and returns the
+// result.
+func (ec ErrorCode) MarshalText() (text []byte, err error) {
+ return []byte(ec.String()), nil
+}
+
+// UnmarshalText decodes the form generated by MarshalText.
+func (ec *ErrorCode) UnmarshalText(text []byte) error {
+ desc, ok := idToDescriptors[string(text)]
+
+ if !ok {
+ desc = ErrorCodeUnknown.Descriptor()
+ }
+
+ *ec = desc.Code
+
+ return nil
+}
+
+// WithMessage creates a new Error struct based on the passed-in info and
+// overrides the Message property.
+func (ec ErrorCode) WithMessage(message string) Error {
+ return Error{
+ Code: ec,
+ Message: message,
+ }
+}
+
+// WithDetail creates a new Error struct based on the passed-in info and
+// set the Detail property appropriately
+func (ec ErrorCode) WithDetail(detail interface{}) Error {
+ return Error{
+ Code: ec,
+ Message: ec.Message(),
+ }.WithDetail(detail)
+}
+
+// WithArgs creates a new Error struct and sets the Args slice
+func (ec ErrorCode) WithArgs(args ...interface{}) Error {
+ return Error{
+ Code: ec,
+ Message: ec.Message(),
+ }.WithArgs(args...)
+}
+
+// Error provides a wrapper around ErrorCode with extra Details provided.
+type Error struct {
+ Code ErrorCode `json:"code"`
+ Message string `json:"message"`
+ Detail interface{} `json:"detail,omitempty"`
+
+ // TODO(duglin): See if we need an "args" property so we can do the
+ // variable substitution right before showing the message to the user
+}
+
+var _ error = Error{}
+
+// ErrorCode returns the ID/Value of this Error
+func (e Error) ErrorCode() ErrorCode {
+ return e.Code
+}
+
+// Error returns a human readable representation of the error.
+func (e Error) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
+}
+
+// WithDetail will return a new Error, based on the current one, but with
+// some Detail info added
+func (e Error) WithDetail(detail interface{}) Error {
+ return Error{
+ Code: e.Code,
+ Message: e.Message,
+ Detail: detail,
+ }
+}
+
+// WithArgs uses the passed-in list of interface{} as the substitution
+// variables in the Error's Message string, but returns a new Error
+func (e Error) WithArgs(args ...interface{}) Error {
+ return Error{
+ Code: e.Code,
+ Message: fmt.Sprintf(e.Code.Message(), args...),
+ Detail: e.Detail,
+ }
+}
+
+// ErrorDescriptor provides relevant information about a given error code.
+type ErrorDescriptor struct {
+ // Code is the error code that this descriptor describes.
+ Code ErrorCode
+
+ // Value provides a unique, string key, often captilized with
+ // underscores, to identify the error code. This value is used as the
+ // keyed value when serializing api errors.
+ Value string
+
+ // Message is a short, human readable decription of the error condition
+ // included in API responses.
+ Message string
+
+ // Description provides a complete account of the errors purpose, suitable
+ // for use in documentation.
+ Description string
+
+ // HTTPStatusCode provides the http status code that is associated with
+ // this error condition.
+ HTTPStatusCode int
+}
+
+// ParseErrorCode returns the value by the string error code.
+// `ErrorCodeUnknown` will be returned if the error is not known.
+func ParseErrorCode(value string) ErrorCode {
+ ed, ok := idToDescriptors[value]
+ if ok {
+ return ed.Code
+ }
+
+ return ErrorCodeUnknown
+}
+
+// Errors provides the envelope for multiple errors and a few sugar methods
+// for use within the application.
+type Errors []error
+
+var _ error = Errors{}
+
+func (errs Errors) Error() string {
+ switch len(errs) {
+ case 0:
+ return "<nil>"
+ case 1:
+ return errs[0].Error()
+ default:
+ msg := "errors:\n"
+ for _, err := range errs {
+ msg += err.Error() + "\n"
+ }
+ return msg
+ }
+}
+
+// Len returns the current number of errors.
+func (errs Errors) Len() int {
+ return len(errs)
+}
+
+// MarshalJSON converts slice of error, ErrorCode or Error into a
+// slice of Error - then serializes
+func (errs Errors) MarshalJSON() ([]byte, error) {
+ var tmpErrs struct {
+ Errors []Error `json:"errors,omitempty"`
+ }
+
+ for _, daErr := range errs {
+ var err Error
+
+ switch daErr.(type) {
+ case ErrorCode:
+ err = daErr.(ErrorCode).WithDetail(nil)
+ case Error:
+ err = daErr.(Error)
+ default:
+ err = ErrorCodeUnknown.WithDetail(daErr)
+
+ }
+
+ // If the Error struct was setup and they forgot to set the
+ // Message field (meaning its "") then grab it from the ErrCode
+ msg := err.Message
+ if msg == "" {
+ msg = err.Code.Message()
+ }
+
+ tmpErrs.Errors = append(tmpErrs.Errors, Error{
+ Code: err.Code,
+ Message: msg,
+ Detail: err.Detail,
+ })
+ }
+
+ return json.Marshal(tmpErrs)
+}
+
+// UnmarshalJSON deserializes []Error and then converts it into slice of
+// Error or ErrorCode
+func (errs *Errors) UnmarshalJSON(data []byte) error {
+ var tmpErrs struct {
+ Errors []Error
+ }
+
+ if err := json.Unmarshal(data, &tmpErrs); err != nil {
+ return err
+ }
+
+ var newErrs Errors
+ for _, daErr := range tmpErrs.Errors {
+ // If Message is empty or exactly matches the Code's message string
+ // then just use the Code, no need for a full Error struct
+ if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
+ // Error's w/o details get converted to ErrorCode
+ newErrs = append(newErrs, daErr.Code)
+ } else {
+ // Error's w/ details are untouched
+ newErrs = append(newErrs, Error{
+ Code: daErr.Code,
+ Message: daErr.Message,
+ Detail: daErr.Detail,
+ })
+ }
+ }
+
+ *errs = newErrs
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
new file mode 100644
index 000000000..49a64a86e
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
@@ -0,0 +1,44 @@
+package errcode
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
+// and sets the content-type header to 'application/json'. It will handle
+// ErrorCoder and Errors, and if necessary will create an envelope.
+func ServeJSON(w http.ResponseWriter, err error) error {
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ var sc int
+
+ switch errs := err.(type) {
+ case Errors:
+ if len(errs) < 1 {
+ break
+ }
+
+ if err, ok := errs[0].(ErrorCoder); ok {
+ sc = err.ErrorCode().Descriptor().HTTPStatusCode
+ }
+ case ErrorCoder:
+ sc = errs.ErrorCode().Descriptor().HTTPStatusCode
+ err = Errors{err} // create an envelope.
+ default:
+ // We just have an unhandled error type, so just place in an envelope
+ // and move along.
+ err = Errors{err}
+ }
+
+ if sc == 0 {
+ sc = http.StatusInternalServerError
+ }
+
+ w.WriteHeader(sc)
+
+ if err := json.NewEncoder(w).Encode(err); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go
new file mode 100644
index 000000000..d1e8826c6
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go
@@ -0,0 +1,138 @@
+package errcode
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+ "sync"
+)
+
+var (
+ errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
+ idToDescriptors = map[string]ErrorDescriptor{}
+ groupToDescriptors = map[string][]ErrorDescriptor{}
+)
+
+var (
+ // ErrorCodeUnknown is a generic error that can be used as a last
+ // resort if there is no situation-specific error message that can be used
+ ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
+ Value: "UNKNOWN",
+ Message: "unknown error",
+ Description: `Generic error returned when the error does not have an
+ API classification.`,
+ HTTPStatusCode: http.StatusInternalServerError,
+ })
+
+ // ErrorCodeUnsupported is returned when an operation is not supported.
+ ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
+ Value: "UNSUPPORTED",
+ Message: "The operation is unsupported.",
+ Description: `The operation was unsupported due to a missing
+ implementation or invalid set of parameters.`,
+ HTTPStatusCode: http.StatusMethodNotAllowed,
+ })
+
+ // ErrorCodeUnauthorized is returned if a request requires
+ // authentication.
+ ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
+ Value: "UNAUTHORIZED",
+ Message: "authentication required",
+ Description: `The access controller was unable to authenticate
+ the client. Often this will be accompanied by a
+ Www-Authenticate HTTP response header indicating how to
+ authenticate.`,
+ HTTPStatusCode: http.StatusUnauthorized,
+ })
+
+ // ErrorCodeDenied is returned if a client does not have sufficient
+ // permission to perform an action.
+ ErrorCodeDenied = Register("errcode", ErrorDescriptor{
+ Value: "DENIED",
+ Message: "requested access to the resource is denied",
+ Description: `The access controller denied access for the
+ operation on a resource.`,
+ HTTPStatusCode: http.StatusForbidden,
+ })
+
+ // ErrorCodeUnavailable provides a common error to report unavailability
+ // of a service or endpoint.
+ ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
+ Value: "UNAVAILABLE",
+ Message: "service unavailable",
+ Description: "Returned when a service is not available",
+ HTTPStatusCode: http.StatusServiceUnavailable,
+ })
+
+ // ErrorCodeTooManyRequests is returned if a client attempts too many
+ // times to contact a service endpoint.
+ ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
+ Value: "TOOMANYREQUESTS",
+ Message: "too many requests",
+ Description: `Returned when a client attempts to contact a
+ service too many times`,
+ HTTPStatusCode: http.StatusTooManyRequests,
+ })
+)
+
+var nextCode = 1000
+var registerLock sync.Mutex
+
+// Register will make the passed-in error known to the environment and
+// return a new ErrorCode
+func Register(group string, descriptor ErrorDescriptor) ErrorCode {
+ registerLock.Lock()
+ defer registerLock.Unlock()
+
+ descriptor.Code = ErrorCode(nextCode)
+
+ if _, ok := idToDescriptors[descriptor.Value]; ok {
+ panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
+ }
+ if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
+ panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
+ }
+
+ groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
+ errorCodeToDescriptors[descriptor.Code] = descriptor
+ idToDescriptors[descriptor.Value] = descriptor
+
+ nextCode++
+ return descriptor.Code
+}
+
+type byValue []ErrorDescriptor
+
+func (a byValue) Len() int { return len(a) }
+func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
+
+// GetGroupNames returns the list of Error group names that are registered
+func GetGroupNames() []string {
+ keys := []string{}
+
+ for k := range groupToDescriptors {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// GetErrorCodeGroup returns the named group of error descriptors
+func GetErrorCodeGroup(name string) []ErrorDescriptor {
+ desc := groupToDescriptors[name]
+ sort.Sort(byValue(desc))
+ return desc
+}
+
+// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
+// registered, irrespective of what group they're in
+func GetErrorAllDescriptors() []ErrorDescriptor {
+ result := []ErrorDescriptor{}
+
+ for _, group := range GetGroupNames() {
+ result = append(result, GetErrorCodeGroup(group)...)
+ }
+ sort.Sort(byValue(result))
+ return result
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
new file mode 100644
index 000000000..a9616c58a
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
@@ -0,0 +1,1596 @@
+package v2
+
+import (
+ "net/http"
+ "regexp"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ nameParameterDescriptor = ParameterDescriptor{
+ Name: "name",
+ Type: "string",
+ Format: reference.NameRegexp.String(),
+ Required: true,
+ Description: `Name of the target repository.`,
+ }
+
+ referenceParameterDescriptor = ParameterDescriptor{
+ Name: "reference",
+ Type: "string",
+ Format: reference.TagRegexp.String(),
+ Required: true,
+ Description: `Tag or digest of the target manifest.`,
+ }
+
+ uuidParameterDescriptor = ParameterDescriptor{
+ Name: "uuid",
+ Type: "opaque",
+ Required: true,
+ Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.",
+ }
+
+ digestPathParameter = ParameterDescriptor{
+ Name: "digest",
+ Type: "path",
+ Required: true,
+ Format: digest.DigestRegexp.String(),
+ Description: `Digest of desired blob.`,
+ }
+
+ hostHeader = ParameterDescriptor{
+ Name: "Host",
+ Type: "string",
+ Description: "Standard HTTP Host Header. Should be set to the registry host.",
+ Format: "<registry host>",
+ Examples: []string{"registry-1.docker.io"},
+ }
+
+ authHeader = ParameterDescriptor{
+ Name: "Authorization",
+ Type: "string",
+ Description: "An RFC7235 compliant authorization header.",
+ Format: "<scheme> <token>",
+ Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="},
+ }
+
+ authChallengeHeader = ParameterDescriptor{
+ Name: "WWW-Authenticate",
+ Type: "string",
+ Description: "An RFC7235 compliant authentication challenge header.",
+ Format: `<scheme> realm="<realm>", ..."`,
+ Examples: []string{
+ `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`,
+ },
+ }
+
+ contentLengthZeroHeader = ParameterDescriptor{
+ Name: "Content-Length",
+ Description: "The `Content-Length` header must be zero and the body must be empty.",
+ Type: "integer",
+ Format: "0",
+ }
+
+ dockerUploadUUIDHeader = ParameterDescriptor{
+ Name: "Docker-Upload-UUID",
+ Description: "Identifies the docker upload uuid for the current request.",
+ Type: "uuid",
+ Format: "<uuid>",
+ }
+
+ digestHeader = ParameterDescriptor{
+ Name: "Docker-Content-Digest",
+ Description: "Digest of the targeted content for the request.",
+ Type: "digest",
+ Format: "<digest>",
+ }
+
+ linkHeader = ParameterDescriptor{
+ Name: "Link",
+ Type: "link",
+ Description: "RFC5988 compliant rel='next' with URL to next result set, if available",
+ Format: `<<url>?n=<last n value>&last=<last entry from response>>; rel="next"`,
+ }
+
+ paginationParameters = []ParameterDescriptor{
+ {
+ Name: "n",
+ Type: "integer",
+ Description: "Limit the number of entries in each response. It not present, all entries will be returned.",
+ Format: "<integer>",
+ Required: false,
+ },
+ {
+ Name: "last",
+ Type: "string",
+ Description: "Result set will include values lexically after last.",
+ Format: "<integer>",
+ Required: false,
+ },
+ }
+
+ unauthorizedResponseDescriptor = ResponseDescriptor{
+ Name: "Authentication Required",
+ StatusCode: http.StatusUnauthorized,
+ Description: "The client is not authenticated.",
+ Headers: []ParameterDescriptor{
+ authChallengeHeader,
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnauthorized,
+ },
+ }
+
+ repositoryNotFoundResponseDescriptor = ResponseDescriptor{
+ Name: "No Such Repository Error",
+ StatusCode: http.StatusNotFound,
+ Description: "The repository is not known to the registry.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ },
+ }
+
+ deniedResponseDescriptor = ResponseDescriptor{
+ Name: "Access Denied",
+ StatusCode: http.StatusForbidden,
+ Description: "The client does not have required access to the repository.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeDenied,
+ },
+ }
+
+ tooManyRequestsDescriptor = ResponseDescriptor{
+ Name: "Too Many Requests",
+ StatusCode: http.StatusTooManyRequests,
+ Description: "The client made too many requests within a time interval.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeTooManyRequests,
+ },
+ }
+)
+
+const (
+ manifestBody = `{
+ "name": <name>,
+ "tag": <tag>,
+ "fsLayers": [
+ {
+ "blobSum": "<digest>"
+ },
+ ...
+ ]
+ ],
+ "history": <v1 images>,
+ "signature": <JWS>
+}`
+
+ errorsBody = `{
+ "errors:" [
+ {
+ "code": <error code>,
+ "message": "<error message>",
+ "detail": ...
+ },
+ ...
+ ]
+}`
+)
+
+// APIDescriptor exports descriptions of the layout of the v2 registry API.
+var APIDescriptor = struct {
+ // RouteDescriptors provides a list of the routes available in the API.
+ RouteDescriptors []RouteDescriptor
+}{
+ RouteDescriptors: routeDescriptors,
+}
+
+// RouteDescriptor describes a route specified by name.
+type RouteDescriptor struct {
+ // Name is the name of the route, as specified in RouteNameXXX exports.
+ // These names a should be considered a unique reference for a route. If
+ // the route is registered with gorilla, this is the name that will be
+ // used.
+ Name string
+
+ // Path is a gorilla/mux-compatible regexp that can be used to match the
+ // route. For any incoming method and path, only one route descriptor
+ // should match.
+ Path string
+
+ // Entity should be a short, human-readalbe description of the object
+ // targeted by the endpoint.
+ Entity string
+
+ // Description should provide an accurate overview of the functionality
+ // provided by the route.
+ Description string
+
+ // Methods should describe the various HTTP methods that may be used on
+ // this route, including request and response formats.
+ Methods []MethodDescriptor
+}
+
+// MethodDescriptor provides a description of the requests that may be
+// conducted with the target method.
+type MethodDescriptor struct {
+
+ // Method is an HTTP method, such as GET, PUT or POST.
+ Method string
+
+ // Description should provide an overview of the functionality provided by
+ // the covered method, suitable for use in documentation. Use of markdown
+ // here is encouraged.
+ Description string
+
+ // Requests is a slice of request descriptors enumerating how this
+ // endpoint may be used.
+ Requests []RequestDescriptor
+}
+
+// RequestDescriptor covers a particular set of headers and parameters that
+// can be carried out with the parent method. Its most helpful to have one
+// RequestDescriptor per API use case.
+type RequestDescriptor struct {
+ // Name provides a short identifier for the request, usable as a title or
+ // to provide quick context for the particular request.
+ Name string
+
+ // Description should cover the requests purpose, covering any details for
+ // this particular use case.
+ Description string
+
+ // Headers describes headers that must be used with the HTTP request.
+ Headers []ParameterDescriptor
+
+ // PathParameters enumerate the parameterized path components for the
+ // given request, as defined in the route's regular expression.
+ PathParameters []ParameterDescriptor
+
+ // QueryParameters provides a list of query parameters for the given
+ // request.
+ QueryParameters []ParameterDescriptor
+
+ // Body describes the format of the request body.
+ Body BodyDescriptor
+
+ // Successes enumerates the possible responses that are considered to be
+ // the result of a successful request.
+ Successes []ResponseDescriptor
+
+ // Failures covers the possible failures from this particular request.
+ Failures []ResponseDescriptor
+}
+
+// ResponseDescriptor describes the components of an API response.
+type ResponseDescriptor struct {
+ // Name provides a short identifier for the response, usable as a title or
+ // to provide quick context for the particular response.
+ Name string
+
+ // Description should provide a brief overview of the role of the
+ // response.
+ Description string
+
+ // StatusCode specifies the status received by this particular response.
+ StatusCode int
+
+ // Headers covers any headers that may be returned from the response.
+ Headers []ParameterDescriptor
+
+ // Fields describes any fields that may be present in the response.
+ Fields []ParameterDescriptor
+
+ // ErrorCodes enumerates the error codes that may be returned along with
+ // the response.
+ ErrorCodes []errcode.ErrorCode
+
+ // Body describes the body of the response, if any.
+ Body BodyDescriptor
+}
+
+// BodyDescriptor describes a request body and its expected content type. For
+// the most part, it should be example json or some placeholder for body
+// data in documentation.
+type BodyDescriptor struct {
+ ContentType string
+ Format string
+}
+
+// ParameterDescriptor describes the format of a request parameter, which may
+// be a header, path parameter or query parameter.
+type ParameterDescriptor struct {
+ // Name is the name of the parameter, either of the path component or
+ // query parameter.
+ Name string
+
+ // Type specifies the type of the parameter, such as string, integer, etc.
+ Type string
+
+ // Description provides a human-readable description of the parameter.
+ Description string
+
+ // Required means the field is required when set.
+ Required bool
+
+ // Format is a specifying the string format accepted by this parameter.
+ Format string
+
+ // Regexp is a compiled regular expression that can be used to validate
+ // the contents of the parameter.
+ Regexp *regexp.Regexp
+
+ // Examples provides multiple examples for the values that might be valid
+ // for this parameter.
+ Examples []string
+}
+
+var routeDescriptors = []RouteDescriptor{
+ {
+ Name: RouteNameBase,
+ Path: "/v2/",
+ Entity: "Base",
+ Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`,
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Check that the endpoint implements Docker Registry API V2.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The API implements V2 protocol and is accessible.",
+ StatusCode: http.StatusOK,
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "The registry does not implement the V2 API.",
+ StatusCode: http.StatusNotFound,
+ },
+ unauthorizedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: RouteNameTags,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list",
+ Entity: "Tags",
+ Description: "Retrieve information about tags.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Fetch the tags under the repository identified by `name`.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Tags",
+ Description: "Return all tags for the repository",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusOK,
+ Description: "A list of tags for the named repository.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "name": <name>,
+ "tags": [
+ <tag>,
+ ...
+ ]
+}`,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Tags Paginated",
+ Description: "Return a portion of the tags for the specified repository.",
+ PathParameters: []ParameterDescriptor{nameParameterDescriptor},
+ QueryParameters: paginationParameters,
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusOK,
+ Description: "A list of tags for the named repository.",
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ linkHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "name": <name>,
+ "tags": [
+ <tag>,
+ ...
+ ],
+}`,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: RouteNameManifest,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}",
+ Entity: "Manifest",
+ Description: "Create, update, delete and retrieve manifests.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ referenceParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.",
+ StatusCode: http.StatusOK,
+ Headers: []ParameterDescriptor{
+ digestHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "<media type of manifest>",
+ Format: manifestBody,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "The name or reference was invalid.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeTagInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "PUT",
+ Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ referenceParameterDescriptor,
+ },
+ Body: BodyDescriptor{
+ ContentType: "<media type of manifest>",
+ Format: manifestBody,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.",
+ StatusCode: http.StatusCreated,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Description: "The canonical location url of the uploaded manifest.",
+ Format: "<url>",
+ },
+ contentLengthZeroHeader,
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Manifest",
+ Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.",
+ StatusCode: http.StatusBadRequest,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeTagInvalid,
+ ErrorCodeManifestInvalid,
+ ErrorCodeManifestUnverified,
+ ErrorCodeBlobUnknown,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ {
+ Name: "Missing Layer(s)",
+ Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "errors:" [{
+ "code": "BLOB_UNKNOWN",
+ "message": "blob unknown to registry",
+ "detail": {
+ "digest": "<digest>"
+ }
+ },
+ ...
+ ]
+}`,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Method: "DELETE",
+ Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ referenceParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusAccepted,
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Reference",
+ Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeTagInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ {
+ Name: "Unknown Manifest",
+ Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeManifestUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ {
+ Name: RouteNameBlob,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
+ Entity: "Blob",
+ Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Fetch Blob",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ digestPathParameter,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.",
+ StatusCode: http.StatusOK,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "The length of the requested blob content.",
+ Format: "<length>",
+ },
+ digestHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<blob binary data>",
+ },
+ },
+ {
+ Description: "The blob identified by `digest` is available at the provided location.",
+ StatusCode: http.StatusTemporaryRedirect,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Description: "The location where the layer should be accessible.",
+ Format: "<blob location>",
+ },
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeDigestInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
+ StatusCode: http.StatusNotFound,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeBlobUnknown,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Fetch Blob Part",
+ Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Range",
+ Type: "string",
+ Description: "HTTP Range header specifying blob chunk.",
+ Format: "bytes=<start>-<end>",
+ },
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ digestPathParameter,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.",
+ StatusCode: http.StatusPartialContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "The length of the requested blob chunk.",
+ Format: "<length>",
+ },
+ {
+ Name: "Content-Range",
+ Type: "byte range",
+ Description: "Content range of blob chunk.",
+ Format: "bytes <start>-<end>/<size>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<blob binary data>",
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeDigestInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeBlobUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.",
+ StatusCode: http.StatusRequestedRangeNotSatisfiable,
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "DELETE",
+ Description: "Delete the blob identified by `name` and `digest`",
+ Requests: []RequestDescriptor{
+ {
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ digestPathParameter,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusAccepted,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "0",
+ Format: "0",
+ },
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ {
+ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
+ StatusCode: http.StatusNotFound,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameUnknown,
+ ErrorCodeBlobUnknown,
+ },
+ },
+ {
+ Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled",
+ StatusCode: http.StatusMethodNotAllowed,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+
+ // TODO(stevvooe): We may want to add a PUT request here to
+ // kickoff an upload of a blob, integrated with the blob upload
+ // API.
+ },
+ },
+
+ {
+ Name: RouteNameBlobUpload,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/",
+ Entity: "Initiate Blob Upload",
+ Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "POST",
+ Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Initiate Monolithic Blob Upload",
+ Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Format: "<length of blob>",
+ },
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ QueryParameters: []ParameterDescriptor{
+ {
+ Name: "digest",
+ Type: "query",
+ Format: "<digest>",
+ Regexp: digest.DigestRegexp,
+ Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octect-stream",
+ Format: "<binary data>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob has been created in the registry and is available at the provided location.",
+ StatusCode: http.StatusCreated,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "<blob location>",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Initiate Resumable Blob Upload",
+ Description: "Initiate a resumable blob upload with an empty request body.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ contentLengthZeroHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.",
+ StatusCode: http.StatusAccepted,
+ Headers: []ParameterDescriptor{
+ contentLengthZeroHeader,
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "/v2/<name>/blobs/uploads/<uuid>",
+ Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+ },
+ {
+ Name: "Range",
+ Format: "0-0",
+ Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.",
+ },
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Mount Blob",
+ Description: "Mount a blob identified by the `mount` parameter from another repository.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ contentLengthZeroHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ },
+ QueryParameters: []ParameterDescriptor{
+ {
+ Name: "mount",
+ Type: "query",
+ Format: "<digest>",
+ Regexp: digest.DigestRegexp,
+ Description: `Digest of blob to mount from the source repository.`,
+ },
+ {
+ Name: "from",
+ Type: "query",
+ Format: "<repository name>",
+ Regexp: reference.NameRegexp,
+ Description: `Name of the source repository.`,
+ },
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Description: "The blob has been mounted in the repository and is available at the provided location.",
+ StatusCode: http.StatusCreated,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "<blob location>",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Name: "Invalid Name or Digest",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ },
+ },
+ {
+ Name: "Not allowed",
+ Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason",
+ StatusCode: http.StatusMethodNotAllowed,
+ ErrorCodes: []errcode.ErrorCode{
+ errcode.ErrorCodeUnsupported,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+
+ {
+ Name: RouteNameBlobUploadChunk,
+ Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}",
+ Entity: "Blob Upload",
+ Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.",
+ Requests: []RequestDescriptor{
+ {
+ Description: "Retrieve the progress of the current upload, as reported by the `Range` header.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Upload Progress",
+ Description: "The upload is known and in progress. The last received offset is available in the `Range` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Range",
+ Type: "header",
+ Format: "0-<offset>",
+ Description: "Range indicating the current progress of the upload.",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "PATCH",
+ Description: "Upload a chunk of data for the specified upload.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Stream upload",
+ Description: "Upload a stream of data to upload without completing the upload.",
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<binary data>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Data Accepted",
+ Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "/v2/<name>/blobs/uploads/<uuid>",
+ Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+ },
+ {
+ Name: "Range",
+ Type: "header",
+ Format: "0-<offset>",
+ Description: "Range indicating the current progress of the upload.",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ {
+ Name: "Chunked upload",
+ Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.",
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Content-Range",
+ Type: "header",
+ Format: "<start of range>-<end of range, inclusive>",
+ Required: true,
+ Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.",
+ },
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Format: "<length of chunk>",
+ Description: "Length of the chunk being uploaded, corresponding the length of the request body.",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<binary chunk>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Chunk Accepted",
+ Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "/v2/<name>/blobs/uploads/<uuid>",
+ Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+ },
+ {
+ Name: "Range",
+ Type: "header",
+ Format: "0-<offset>",
+ Description: "Range indicating the current progress of the upload.",
+ },
+ contentLengthZeroHeader,
+ dockerUploadUUIDHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.",
+ StatusCode: http.StatusRequestedRangeNotSatisfiable,
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "PUT",
+ Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
+ Requests: []RequestDescriptor{
+ {
+ Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.",
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Format: "<length of data>",
+ Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
+ },
+ },
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ QueryParameters: []ParameterDescriptor{
+ {
+ Name: "digest",
+ Type: "string",
+ Format: "<digest>",
+ Regexp: digest.DigestRegexp,
+ Required: true,
+ Description: `Digest of uploaded blob.`,
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/octet-stream",
+ Format: "<binary data>",
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Upload Complete",
+ Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Location",
+ Type: "url",
+ Format: "<blob location>",
+ Description: "The canonical location of the blob for retrieval",
+ },
+ {
+ Name: "Content-Range",
+ Type: "header",
+ Format: "<start of range>-<end of range, inclusive>",
+ Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.",
+ },
+ contentLengthZeroHeader,
+ digestHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "There was an error processing the upload and it must be restarted.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeDigestInvalid,
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ errcode.ErrorCodeUnsupported,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The upload must be restarted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ {
+ Method: "DELETE",
+ Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.",
+ Requests: []RequestDescriptor{
+ {
+ Description: "Cancel the upload specified by `uuid`.",
+ PathParameters: []ParameterDescriptor{
+ nameParameterDescriptor,
+ uuidParameterDescriptor,
+ },
+ Headers: []ParameterDescriptor{
+ hostHeader,
+ authHeader,
+ contentLengthZeroHeader,
+ },
+ Successes: []ResponseDescriptor{
+ {
+ Name: "Upload Deleted",
+ Description: "The upload has been successfully deleted.",
+ StatusCode: http.StatusNoContent,
+ Headers: []ParameterDescriptor{
+ contentLengthZeroHeader,
+ },
+ },
+ },
+ Failures: []ResponseDescriptor{
+ {
+ Description: "An error was encountered processing the delete. The client may ignore this error.",
+ StatusCode: http.StatusBadRequest,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeNameInvalid,
+ ErrorCodeBlobUploadInvalid,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ {
+ Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.",
+ StatusCode: http.StatusNotFound,
+ ErrorCodes: []errcode.ErrorCode{
+ ErrorCodeBlobUploadUnknown,
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: errorsBody,
+ },
+ },
+ unauthorizedResponseDescriptor,
+ repositoryNotFoundResponseDescriptor,
+ deniedResponseDescriptor,
+ tooManyRequestsDescriptor,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ Name: RouteNameCatalog,
+ Path: "/v2/_catalog",
+ Entity: "Catalog",
+ Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.",
+ Methods: []MethodDescriptor{
+ {
+ Method: "GET",
+ Description: "Retrieve a sorted, json list of repositories available in the registry.",
+ Requests: []RequestDescriptor{
+ {
+ Name: "Catalog Fetch",
+ Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.",
+ Successes: []ResponseDescriptor{
+ {
+ Description: "Returns the unabridged list of repositories as a json response.",
+ StatusCode: http.StatusOK,
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ },
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "repositories": [
+ <name>,
+ ...
+ ]
+}`,
+ },
+ },
+ },
+ },
+ {
+ Name: "Catalog Fetch Paginated",
+ Description: "Return the specified portion of repositories.",
+ QueryParameters: paginationParameters,
+ Successes: []ResponseDescriptor{
+ {
+ StatusCode: http.StatusOK,
+ Body: BodyDescriptor{
+ ContentType: "application/json; charset=utf-8",
+ Format: `{
+ "repositories": [
+ <name>,
+ ...
+ ]
+ "next": "<url>?last=<name>&n=<last value of n>"
+}`,
+ },
+ Headers: []ParameterDescriptor{
+ {
+ Name: "Content-Length",
+ Type: "integer",
+ Description: "Length of the JSON response body.",
+ Format: "<length>",
+ },
+ linkHeader,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+}
+
+var routeDescriptorsMap map[string]RouteDescriptor
+
+func init() {
+ routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors))
+
+ for _, descriptor := range routeDescriptors {
+ routeDescriptorsMap[descriptor.Name] = descriptor
+ }
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go
new file mode 100644
index 000000000..cde011959
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go
@@ -0,0 +1,9 @@
+// Package v2 describes routes, urls and the error codes used in the Docker
+// Registry JSON HTTP API V2. In addition to declarations, descriptors are
+// provided for routes and error codes that can be used for implementation and
+// automatically generating documentation.
+//
+// Definitions here are considered to be locked down for the V2 registry api.
+// Any changes must be considered carefully and should not proceed without a
+// change proposal in docker core.
+package v2
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
new file mode 100644
index 000000000..97d6923aa
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
@@ -0,0 +1,136 @@
+package v2
+
+import (
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+)
+
+const errGroup = "registry.api.v2"
+
+var (
+ // ErrorCodeDigestInvalid is returned when uploading a blob if the
+ // provided digest does not match the blob contents.
+ ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "DIGEST_INVALID",
+ Message: "provided digest did not match uploaded content",
+ Description: `When a blob is uploaded, the registry will check that
+ the content matches the digest provided by the client. The error may
+ include a detail structure with the key "digest", including the
+ invalid digest string. This error may also be returned when a manifest
+ includes an invalid layer digest.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeSizeInvalid is returned when uploading a blob if the provided
+ ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "SIZE_INVALID",
+ Message: "provided length did not match content length",
+ Description: `When a layer is uploaded, the provided size will be
+ checked against the uploaded content. If they do not match, this error
+ will be returned.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeNameInvalid is returned when the name in the manifest does not
+ // match the provided name.
+ ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "NAME_INVALID",
+ Message: "invalid repository name",
+ Description: `Invalid repository name encountered either during
+ manifest validation or any API operation.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeTagInvalid is returned when the tag in the manifest does not
+ // match the provided tag.
+ ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "TAG_INVALID",
+ Message: "manifest tag did not match URI",
+ Description: `During a manifest upload, if the tag in the manifest
+ does not match the uri tag, this error will be returned.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeNameUnknown when the repository name is not known.
+ ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "NAME_UNKNOWN",
+ Message: "repository name not known to registry",
+ Description: `This is returned if the name used during an operation is
+ unknown to the registry.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeManifestUnknown returned when image manifest is unknown.
+ ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_UNKNOWN",
+ Message: "manifest unknown",
+ Description: `This error is returned when the manifest, identified by
+ name and tag is unknown to the repository.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeManifestInvalid returned when an image manifest is invalid,
+ // typically during a PUT operation. This error encompasses all errors
+ // encountered during manifest validation that aren't signature errors.
+ ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_INVALID",
+ Message: "manifest invalid",
+ Description: `During upload, manifests undergo several checks ensuring
+ validity. If those checks fail, this error may be returned, unless a
+ more specific error is included. The detail will contain information
+ the failed validation.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeManifestUnverified is returned when the manifest fails
+ // signature verification.
+ ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_UNVERIFIED",
+ Message: "manifest failed signature verification",
+ Description: `During manifest upload, if the manifest fails signature
+ verification, this error will be returned.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeManifestBlobUnknown is returned when a manifest blob is
+ // unknown to the registry.
+ ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "MANIFEST_BLOB_UNKNOWN",
+ Message: "blob unknown to registry",
+ Description: `This error may be returned when a manifest blob is
+ unknown to the registry.`,
+ HTTPStatusCode: http.StatusBadRequest,
+ })
+
+ // ErrorCodeBlobUnknown is returned when a blob is unknown to the
+ // registry. This can happen when the manifest references a nonexistent
+ // layer or the result is not found by a blob fetch.
+ ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "BLOB_UNKNOWN",
+ Message: "blob unknown to registry",
+ Description: `This error may be returned when a blob is unknown to the
+ registry in a specified repository. This can be returned with a
+ standard get or if a manifest references an unknown layer during
+ upload.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
+ ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "BLOB_UPLOAD_UNKNOWN",
+ Message: "blob upload unknown to registry",
+ Description: `If a blob upload has been cancelled or was never
+ started, this error code may be returned.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+
+ // ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
+ ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+ Value: "BLOB_UPLOAD_INVALID",
+ Message: "blob upload invalid",
+ Description: `The blob upload encountered an error and can no
+ longer proceed.`,
+ HTTPStatusCode: http.StatusNotFound,
+ })
+)
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
new file mode 100644
index 000000000..9bc41a3a6
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
@@ -0,0 +1,161 @@
+package v2
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+var (
+ // according to rfc7230
+ reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
+ reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
+ reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
+)
+
+// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
+// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
+// function parses only the first element of the list, which is set by the very first proxy. It returns a map
+// of corresponding key-value pairs and an unparsed slice of the input string.
+//
+// Examples of Forwarded header values:
+//
+// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
+// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
+//
+// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
+// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
+func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
+ // Following are states of forwarded header parser. Any state could transition to a failure.
+ const (
+ // terminating state; can transition to Parameter
+ stateElement = iota
+ // terminating state; can transition to KeyValueDelimiter
+ stateParameter
+ // can transition to Value
+ stateKeyValueDelimiter
+ // can transition to one of { QuotedValue, PairEnd }
+ stateValue
+ // can transition to one of { EscapedCharacter, PairEnd }
+ stateQuotedValue
+ // can transition to one of { QuotedValue }
+ stateEscapedCharacter
+ // terminating state; can transition to one of { Parameter, Element }
+ statePairEnd
+ )
+
+ var (
+ parameter string
+ value string
+ parse = forwarded[:]
+ res = map[string]string{}
+ state = stateElement
+ )
+
+Loop:
+ for {
+ // skip spaces unless in quoted value
+ if state != stateQuotedValue && state != stateEscapedCharacter {
+ parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
+ }
+
+ if len(parse) == 0 {
+ if state != stateElement && state != statePairEnd && state != stateParameter {
+ return nil, parse, fmt.Errorf("unexpected end of input")
+ }
+ // terminating
+ break
+ }
+
+ switch state {
+ // terminate at list element delimiter
+ case stateElement:
+ if parse[0] == ',' {
+ parse = parse[1:]
+ break Loop
+ }
+ state = stateParameter
+
+ // parse parameter (the key of key-value pair)
+ case stateParameter:
+ match := reToken.FindString(parse)
+ if len(match) == 0 {
+ return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
+ }
+ parameter = strings.ToLower(match)
+ parse = parse[len(match):]
+ state = stateKeyValueDelimiter
+
+ // parse '='
+ case stateKeyValueDelimiter:
+ if parse[0] != '=' {
+ return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
+ }
+ parse = parse[1:]
+ state = stateValue
+
+ // parse value or quoted value
+ case stateValue:
+ if parse[0] == '"' {
+ parse = parse[1:]
+ state = stateQuotedValue
+ } else {
+ value = reToken.FindString(parse)
+ if len(value) == 0 {
+ return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
+ }
+ if _, exists := res[parameter]; exists {
+ return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
+ }
+ res[parameter] = value
+ parse = parse[len(value):]
+ value = ""
+ state = statePairEnd
+ }
+
+ // parse a part of quoted value until the first backslash
+ case stateQuotedValue:
+ match := reQuotedValue.FindString(parse)
+ value += match
+ parse = parse[len(match):]
+ switch {
+ case len(parse) == 0:
+ return nil, parse, fmt.Errorf("unterminated quoted string")
+ case parse[0] == '"':
+ res[parameter] = value
+ value = ""
+ parse = parse[1:]
+ state = statePairEnd
+ case parse[0] == '\\':
+ parse = parse[1:]
+ state = stateEscapedCharacter
+ }
+
+ // parse escaped character in a quoted string, ignore the backslash
+ // transition back to QuotedValue state
+ case stateEscapedCharacter:
+ c := reEscapedCharacter.FindString(parse)
+ if len(c) == 0 {
+ return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
+ }
+ value += c
+ parse = parse[1:]
+ state = stateQuotedValue
+
+ // expect either a new key-value pair, new list or end of input
+ case statePairEnd:
+ switch parse[0] {
+ case ';':
+ parse = parse[1:]
+ state = stateParameter
+ case ',':
+ state = stateElement
+ default:
+ return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
+ }
+ }
+ }
+
+ return res, parse, nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
new file mode 100644
index 000000000..5b80d5be7
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
@@ -0,0 +1,49 @@
+package v2
+
+import "github.com/gorilla/mux"
+
+// The following are definitions of the name under which all V2 routes are
+// registered. These symbols can be used to look up a route based on the name.
+const (
+ RouteNameBase = "base"
+ RouteNameManifest = "manifest"
+ RouteNameTags = "tags"
+ RouteNameBlob = "blob"
+ RouteNameBlobUpload = "blob-upload"
+ RouteNameBlobUploadChunk = "blob-upload-chunk"
+ RouteNameCatalog = "catalog"
+)
+
+var allEndpoints = []string{
+ RouteNameManifest,
+ RouteNameCatalog,
+ RouteNameTags,
+ RouteNameBlob,
+ RouteNameBlobUpload,
+ RouteNameBlobUploadChunk,
+}
+
+// Router builds a gorilla router with named routes for the various API
+// methods. This can be used directly by both server implementations and
+// clients.
+func Router() *mux.Router {
+ return RouterWithPrefix("")
+}
+
+// RouterWithPrefix builds a gorilla router with a configured prefix
+// on all routes.
+func RouterWithPrefix(prefix string) *mux.Router {
+ rootRouter := mux.NewRouter()
+ router := rootRouter
+ if prefix != "" {
+ router = router.PathPrefix(prefix).Subrouter()
+ }
+
+ router.StrictSlash(true)
+
+ for _, descriptor := range routeDescriptors {
+ router.Path(descriptor.Path).Name(descriptor.Name)
+ }
+
+ return rootRouter
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
new file mode 100644
index 000000000..1337bdb12
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
@@ -0,0 +1,266 @@
+package v2
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/docker/distribution/reference"
+ "github.com/gorilla/mux"
+)
+
+// URLBuilder creates registry API urls from a single base endpoint. It can be
+// used to create urls for use in a registry client or server.
+//
+// All urls will be created from the given base, including the api version.
+// For example, if a root of "/foo/" is provided, urls generated will be fall
+// under "/foo/v2/...". Most application will only provide a schema, host and
+// port, such as "https://localhost:5000/".
+type URLBuilder struct {
+ root *url.URL // url root (ie http://localhost/)
+ router *mux.Router
+ relative bool
+}
+
+// NewURLBuilder creates a URLBuilder with provided root url object.
+func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
+ return &URLBuilder{
+ root: root,
+ router: Router(),
+ relative: relative,
+ }
+}
+
+// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
+// a string argument for the root, returning an error if it is not a valid
+// url.
+func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
+ u, err := url.Parse(root)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewURLBuilder(u, relative), nil
+}
+
+// NewURLBuilderFromRequest uses information from an *http.Request to
+// construct the root url.
+func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
+ var (
+ scheme = "http"
+ host = r.Host
+ )
+
+ if r.TLS != nil {
+ scheme = "https"
+ } else if len(r.URL.Scheme) > 0 {
+ scheme = r.URL.Scheme
+ }
+
+ // Handle fowarded headers
+ // Prefer "Forwarded" header as defined by rfc7239 if given
+ // see https://tools.ietf.org/html/rfc7239
+ if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 {
+ forwardedHeader, _, err := parseForwardedHeader(forwarded)
+ if err == nil {
+ if fproto := forwardedHeader["proto"]; len(fproto) > 0 {
+ scheme = fproto
+ }
+ if fhost := forwardedHeader["host"]; len(fhost) > 0 {
+ host = fhost
+ }
+ }
+ } else {
+ if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 {
+ scheme = forwardedProto
+ }
+ if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
+ // According to the Apache mod_proxy docs, X-Forwarded-Host can be a
+ // comma-separated list of hosts, to which each proxy appends the
+ // requested host. We want to grab the first from this comma-separated
+ // list.
+ hosts := strings.SplitN(forwardedHost, ",", 2)
+ host = strings.TrimSpace(hosts[0])
+ }
+ }
+
+ basePath := routeDescriptorsMap[RouteNameBase].Path
+
+ requestPath := r.URL.Path
+ index := strings.Index(requestPath, basePath)
+
+ u := &url.URL{
+ Scheme: scheme,
+ Host: host,
+ }
+
+ if index > 0 {
+ // N.B. index+1 is important because we want to include the trailing /
+ u.Path = requestPath[0 : index+1]
+ }
+
+ return NewURLBuilder(u, relative)
+}
+
+// BuildBaseURL constructs a base url for the API, typically just "/v2/".
+func (ub *URLBuilder) BuildBaseURL() (string, error) {
+ route := ub.cloneRoute(RouteNameBase)
+
+ baseURL, err := route.URL()
+ if err != nil {
+ return "", err
+ }
+
+ return baseURL.String(), nil
+}
+
+// BuildCatalogURL constructs a url get a catalog of repositories
+func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
+ route := ub.cloneRoute(RouteNameCatalog)
+
+ catalogURL, err := route.URL()
+ if err != nil {
+ return "", err
+ }
+
+ return appendValuesURL(catalogURL, values...).String(), nil
+}
+
+// BuildTagsURL constructs a url to list the tags in the named repository.
+func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
+ route := ub.cloneRoute(RouteNameTags)
+
+ tagsURL, err := route.URL("name", name.Name())
+ if err != nil {
+ return "", err
+ }
+
+ return tagsURL.String(), nil
+}
+
+// BuildManifestURL constructs a url for the manifest identified by name and
+// reference. The argument reference may be either a tag or digest.
+func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
+ route := ub.cloneRoute(RouteNameManifest)
+
+ tagOrDigest := ""
+ switch v := ref.(type) {
+ case reference.Tagged:
+ tagOrDigest = v.Tag()
+ case reference.Digested:
+ tagOrDigest = v.Digest().String()
+ default:
+ return "", fmt.Errorf("reference must have a tag or digest")
+ }
+
+ manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
+ if err != nil {
+ return "", err
+ }
+
+ return manifestURL.String(), nil
+}
+
+// BuildBlobURL constructs the url for the blob identified by name and dgst.
+func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
+ route := ub.cloneRoute(RouteNameBlob)
+
+ layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
+ if err != nil {
+ return "", err
+ }
+
+ return layerURL.String(), nil
+}
+
+// BuildBlobUploadURL constructs a url to begin a blob upload in the
+// repository identified by name.
+func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
+ route := ub.cloneRoute(RouteNameBlobUpload)
+
+ uploadURL, err := route.URL("name", name.Name())
+ if err != nil {
+ return "", err
+ }
+
+ return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
+// including any url values. This should generally not be used by clients, as
+// this url is provided by server implementations during the blob upload
+// process.
+func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
+ route := ub.cloneRoute(RouteNameBlobUploadChunk)
+
+ uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
+ if err != nil {
+ return "", err
+ }
+
+ return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// clondedRoute returns a clone of the named route from the router. Routes
+// must be cloned to avoid modifying them during url generation.
+func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
+ route := new(mux.Route)
+ root := new(url.URL)
+
+ *route = *ub.router.GetRoute(name) // clone the route
+ *root = *ub.root
+
+ return clonedRoute{Route: route, root: root, relative: ub.relative}
+}
+
+type clonedRoute struct {
+ *mux.Route
+ root *url.URL
+ relative bool
+}
+
+func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
+ routeURL, err := cr.Route.URL(pairs...)
+ if err != nil {
+ return nil, err
+ }
+
+ if cr.relative {
+ return routeURL, nil
+ }
+
+ if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
+ routeURL.Path = routeURL.Path[1:]
+ }
+
+ url := cr.root.ResolveReference(routeURL)
+ url.Scheme = cr.root.Scheme
+ return url, nil
+}
+
+// appendValuesURL appends the parameters to the url.
+func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
+ merged := u.Query()
+
+ for _, v := range values {
+ for k, vv := range v {
+ merged[k] = append(merged[k], vv...)
+ }
+ }
+
+ u.RawQuery = merged.Encode()
+ return u
+}
+
+// appendValues appends the parameters to the url. Panics if the string is not
+// a url.
+func appendValues(u string, values ...url.Values) string {
+ up, err := url.Parse(u)
+
+ if err != nil {
+ panic(err) // should never happen
+ }
+
+ return appendValuesURL(up, values...).String()
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
new file mode 100644
index 000000000..2c3ebe165
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
@@ -0,0 +1,27 @@
+package challenge
+
+import (
+ "net/url"
+ "strings"
+)
+
+// FROM: https://golang.org/src/net/http/http.go
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// FROM: http://golang.org/src/net/http/transport.go
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+// FROM: http://golang.org/src/net/http/transport.go
+func canonicalAddr(url *url.URL) string {
+ addr := url.Host
+ if !hasPort(addr) {
+ return addr + ":" + portMap[url.Scheme]
+ }
+ return addr
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
new file mode 100644
index 000000000..c9bdfc355
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
@@ -0,0 +1,237 @@
+package challenge
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+)
+
+// Challenge carries information from a WWW-Authenticate response header.
+// See RFC 2617.
+type Challenge struct {
+ // Scheme is the auth-scheme according to RFC 2617
+ Scheme string
+
+ // Parameters are the auth-params according to RFC 2617
+ Parameters map[string]string
+}
+
+// Manager manages the challenges for endpoints.
+// The challenges are pulled out of HTTP responses. Only
+// responses which expect challenges should be added to
+// the manager, since a non-unauthorized request will be
+// viewed as not requiring challenges.
+type Manager interface {
+ // GetChallenges returns the challenges for the given
+ // endpoint URL.
+ GetChallenges(endpoint url.URL) ([]Challenge, error)
+
+ // AddResponse adds the response to the challenge
+ // manager. The challenges will be parsed out of
+ // the WWW-Authenicate headers and added to the
+ // URL which was produced the response. If the
+ // response was authorized, any challenges for the
+ // endpoint will be cleared.
+ AddResponse(resp *http.Response) error
+}
+
+// NewSimpleManager returns an instance of
+// Manger which only maps endpoints to challenges
+// based on the responses which have been added the
+// manager. The simple manager will make no attempt to
+// perform requests on the endpoints or cache the responses
+// to a backend.
+func NewSimpleManager() Manager {
+ return &simpleManager{
+ Challanges: make(map[string][]Challenge),
+ }
+}
+
+type simpleManager struct {
+ sync.RWMutex
+ Challanges map[string][]Challenge
+}
+
+func normalizeURL(endpoint *url.URL) {
+ endpoint.Host = strings.ToLower(endpoint.Host)
+ endpoint.Host = canonicalAddr(endpoint)
+}
+
+func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
+ normalizeURL(&endpoint)
+
+ m.RLock()
+ defer m.RUnlock()
+ challenges := m.Challanges[endpoint.String()]
+ return challenges, nil
+}
+
+func (m *simpleManager) AddResponse(resp *http.Response) error {
+ challenges := ResponseChallenges(resp)
+ if resp.Request == nil {
+ return fmt.Errorf("missing request reference")
+ }
+ urlCopy := url.URL{
+ Path: resp.Request.URL.Path,
+ Host: resp.Request.URL.Host,
+ Scheme: resp.Request.URL.Scheme,
+ }
+ normalizeURL(&urlCopy)
+
+ m.Lock()
+ defer m.Unlock()
+ m.Challanges[urlCopy.String()] = challenges
+ return nil
+}
+
+// Octet types from RFC 2616.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET = <any 8-bit sequence of data>
+ // CHAR = <any US-ASCII character (octets 0 - 127)>
+ // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ // CR = <US-ASCII CR, carriage return (13)>
+ // LF = <US-ASCII LF, linefeed (10)>
+ // SP = <US-ASCII SP, space (32)>
+ // HT = <US-ASCII HT, horizontal-tab (9)>
+ // <"> = <US-ASCII double-quote mark (34)>
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT = <any OCTET except CTLs, but including LWS>
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*<any CHAR except CTLs or separators>
+ // qdtext = <any TEXT except <">>
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// ResponseChallenges returns a list of authorization challenges
+// for the given http Response. Challenges are only checked if
+// the response status code was a 401.
+func ResponseChallenges(resp *http.Response) []Challenge {
+ if resp.StatusCode == http.StatusUnauthorized {
+ // Parse the WWW-Authenticate Header and store the challenges
+ // on this endpoint object.
+ return parseAuthHeader(resp.Header)
+ }
+
+ return nil
+}
+
+func parseAuthHeader(header http.Header) []Challenge {
+ challenges := []Challenge{}
+ for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+ v, p := parseValueAndParams(h)
+ if v != "" {
+ challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
+ }
+ }
+ return challenges
+}
+
+func parseValueAndParams(header string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s := expectToken(header)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ s = "," + skipSpace(s)
+ for strings.HasPrefix(s, ",") {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s[1:]))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return
+ }
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
new file mode 100644
index 000000000..e3ffcb00f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
@@ -0,0 +1,162 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+)
+
+type httpBlobUpload struct {
+ statter distribution.BlobStatter
+ client *http.Client
+
+ uuid string
+ startedAt time.Time
+
+ location string // always the last value of the location header.
+ offset int64
+ closed bool
+}
+
+func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
+ panic("Not implemented")
+}
+
+func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode == http.StatusNotFound {
+ return distribution.ErrBlobUploadUnknown
+ }
+ return HandleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
+ req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
+ if err != nil {
+ return 0, err
+ }
+ defer req.Body.Close()
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ if !SuccessStatus(resp.StatusCode) {
+ return 0, hbu.handleErrorResponse(resp)
+ }
+
+ hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+ hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+ if err != nil {
+ return 0, err
+ }
+ rng := resp.Header.Get("Range")
+ var start, end int64
+ if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+ return 0, err
+ } else if n != 2 || end < start {
+ return 0, fmt.Errorf("bad range format: %s", rng)
+ }
+
+ return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
+ req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
+ if err != nil {
+ return 0, err
+ }
+ req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
+ req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
+ req.Header.Set("Content-Type", "application/octet-stream")
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ if !SuccessStatus(resp.StatusCode) {
+ return 0, hbu.handleErrorResponse(resp)
+ }
+
+ hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+ hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+ if err != nil {
+ return 0, err
+ }
+ rng := resp.Header.Get("Range")
+ var start, end int
+ if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+ return 0, err
+ } else if n != 2 || end < start {
+ return 0, fmt.Errorf("bad range format: %s", rng)
+ }
+
+ return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Size() int64 {
+ return hbu.offset
+}
+
+func (hbu *httpBlobUpload) ID() string {
+ return hbu.uuid
+}
+
+func (hbu *httpBlobUpload) StartedAt() time.Time {
+ return hbu.startedAt
+}
+
+func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
+ // TODO(dmcgowan): Check if already finished, if so just fetch
+ req, err := http.NewRequest("PUT", hbu.location, nil)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ values := req.URL.Query()
+ values.Set("digest", desc.Digest.String())
+ req.URL.RawQuery = values.Encode()
+
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if !SuccessStatus(resp.StatusCode) {
+ return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
+ }
+
+ return hbu.statter.Stat(ctx, desc.Digest)
+}
+
+func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
+ req, err := http.NewRequest("DELETE", hbu.location, nil)
+ if err != nil {
+ return err
+ }
+ resp, err := hbu.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return hbu.handleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) Close() error {
+ hbu.closed = true
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go
new file mode 100644
index 000000000..52d49d5d2
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/errors.go
@@ -0,0 +1,139 @@
+package client
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/docker/distribution/registry/client/auth/challenge"
+)
+
+// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
+// errcode.Errors slice.
+var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
+
+// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
+// returned when making a registry api call.
+type UnexpectedHTTPStatusError struct {
+ Status string
+}
+
+func (e *UnexpectedHTTPStatusError) Error() string {
+ return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
+}
+
+// UnexpectedHTTPResponseError is returned when an expected HTTP status code
+// is returned, but the content was unexpected and failed to be parsed.
+type UnexpectedHTTPResponseError struct {
+ ParseErr error
+ StatusCode int
+ Response []byte
+}
+
+func (e *UnexpectedHTTPResponseError) Error() string {
+ return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
+}
+
+func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
+ var errors errcode.Errors
+ body, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ // For backward compatibility, handle irregularly formatted
+ // messages that contain a "details" field.
+ var detailsErr struct {
+ Details string `json:"details"`
+ }
+ err = json.Unmarshal(body, &detailsErr)
+ if err == nil && detailsErr.Details != "" {
+ switch statusCode {
+ case http.StatusUnauthorized:
+ return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
+ case http.StatusTooManyRequests:
+ return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
+ default:
+ return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
+ }
+ }
+
+ if err := json.Unmarshal(body, &errors); err != nil {
+ return &UnexpectedHTTPResponseError{
+ ParseErr: err,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ if len(errors) == 0 {
+ // If there was no error specified in the body, return
+ // UnexpectedHTTPResponseError.
+ return &UnexpectedHTTPResponseError{
+ ParseErr: ErrNoErrorsInBody,
+ StatusCode: statusCode,
+ Response: body,
+ }
+ }
+
+ return errors
+}
+
+func makeErrorList(err error) []error {
+ if errL, ok := err.(errcode.Errors); ok {
+ return []error(errL)
+ }
+ return []error{err}
+}
+
+func mergeErrors(err1, err2 error) error {
+ return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
+}
+
+// HandleErrorResponse returns error parsed from HTTP response for an
+// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
+// UnexpectedHTTPStatusError returned for response code outside of expected
+// range.
+func HandleErrorResponse(resp *http.Response) error {
+ if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+ // Check for OAuth errors within the `WWW-Authenticate` header first
+ // See https://tools.ietf.org/html/rfc6750#section-3
+ for _, c := range challenge.ResponseChallenges(resp) {
+ if c.Scheme == "bearer" {
+ var err errcode.Error
+ // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
+ switch c.Parameters["error"] {
+ case "invalid_token":
+ err.Code = errcode.ErrorCodeUnauthorized
+ case "insufficient_scope":
+ err.Code = errcode.ErrorCodeDenied
+ default:
+ continue
+ }
+ if description := c.Parameters["error_description"]; description != "" {
+ err.Message = description
+ } else {
+ err.Message = err.Code.Message()
+ }
+
+ return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
+ }
+ }
+ err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
+ if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
+ return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
+ }
+ return err
+ }
+ return &UnexpectedHTTPStatusError{Status: resp.Status}
+}
+
+// SuccessStatus returns true if the argument is a successful HTTP response
+// code (in the range 200 - 399 inclusive).
+func SuccessStatus(status int) bool {
+ return status >= 200 && status <= 399
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
new file mode 100644
index 000000000..b82a968e2
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/repository.go
@@ -0,0 +1,853 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/distribution/registry/client/transport"
+ "github.com/docker/distribution/registry/storage/cache"
+ "github.com/docker/distribution/registry/storage/cache/memory"
+ "github.com/opencontainers/go-digest"
+)
+
+// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
+type Registry interface {
+ Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+}
+
+// checkHTTPRedirect is a callback that can manipulate redirected HTTP
+// requests. It is used to preserve Accept and Range headers.
+func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+
+ if len(via) > 0 {
+ for headerName, headerVals := range via[0].Header {
+ if headerName != "Accept" && headerName != "Range" {
+ continue
+ }
+ for _, val := range headerVals {
+ // Don't add to redirected request if redirected
+ // request already has a header with the same
+ // name and value.
+ hasValue := false
+ for _, existingVal := range req.Header[headerName] {
+ if existingVal == val {
+ hasValue = true
+ break
+ }
+ }
+ if !hasValue {
+ req.Header.Add(headerName, val)
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// NewRegistry creates a registry namespace which can be used to get a listing of repositories
+func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
+ ub, err := v2.NewURLBuilderFromString(baseURL, false)
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ Timeout: 1 * time.Minute,
+ CheckRedirect: checkHTTPRedirect,
+ }
+
+ return &registry{
+ client: client,
+ ub: ub,
+ context: ctx,
+ }, nil
+}
+
+type registry struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+}
+
+// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
+// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
+// are no more entries
+func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
+ var numFilled int
+ var returnErr error
+
+ values := buildCatalogValues(len(entries), last)
+ u, err := r.ub.BuildCatalogURL(values)
+ if err != nil {
+ return 0, err
+ }
+
+ resp, err := r.client.Get(u)
+ if err != nil {
+ return 0, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ var ctlg struct {
+ Repositories []string `json:"repositories"`
+ }
+ decoder := json.NewDecoder(resp.Body)
+
+ if err := decoder.Decode(&ctlg); err != nil {
+ return 0, err
+ }
+
+ for cnt := range ctlg.Repositories {
+ entries[cnt] = ctlg.Repositories[cnt]
+ }
+ numFilled = len(ctlg.Repositories)
+
+ link := resp.Header.Get("Link")
+ if link == "" {
+ returnErr = io.EOF
+ }
+ } else {
+ return 0, HandleErrorResponse(resp)
+ }
+
+ return numFilled, returnErr
+}
+
+// NewRepository creates a new Repository for the given repository name and base URL.
+func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
+ ub, err := v2.NewURLBuilderFromString(baseURL, false)
+ if err != nil {
+ return nil, err
+ }
+
+ client := &http.Client{
+ Transport: transport,
+ CheckRedirect: checkHTTPRedirect,
+ // TODO(dmcgowan): create cookie jar
+ }
+
+ return &repository{
+ client: client,
+ ub: ub,
+ name: name,
+ context: ctx,
+ }, nil
+}
+
+type repository struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+ name reference.Named
+}
+
+func (r *repository) Named() reference.Named {
+ return r.name
+}
+
+func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
+ statter := &blobStatter{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ }
+ return &blobs{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
+ }
+}
+
+func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
+ // todo(richardscothern): options should be sent over the wire
+ return &manifests{
+ name: r.name,
+ ub: r.ub,
+ client: r.client,
+ etags: make(map[string]string),
+ }, nil
+}
+
+func (r *repository) Tags(ctx context.Context) distribution.TagService {
+ return &tags{
+ client: r.client,
+ ub: r.ub,
+ context: r.context,
+ name: r.Named(),
+ }
+}
+
+// tags implements remote tagging operations.
+type tags struct {
+ client *http.Client
+ ub *v2.URLBuilder
+ context context.Context
+ name reference.Named
+}
+
+// All returns all tags
+func (t *tags) All(ctx context.Context) ([]string, error) {
+ var tags []string
+
+ u, err := t.ub.BuildTagsURL(t.name)
+ if err != nil {
+ return tags, err
+ }
+
+ for {
+ resp, err := t.client.Get(u)
+ if err != nil {
+ return tags, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return tags, err
+ }
+
+ tagsResponse := struct {
+ Tags []string `json:"tags"`
+ }{}
+ if err := json.Unmarshal(b, &tagsResponse); err != nil {
+ return tags, err
+ }
+ tags = append(tags, tagsResponse.Tags...)
+ if link := resp.Header.Get("Link"); link != "" {
+ u = strings.Trim(strings.Split(link, ";")[0], "<>")
+ } else {
+ return tags, nil
+ }
+ } else {
+ return tags, HandleErrorResponse(resp)
+ }
+ }
+}
+
+func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
+ desc := distribution.Descriptor{}
+ headers := response.Header
+
+ ctHeader := headers.Get("Content-Type")
+ if ctHeader == "" {
+ return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
+ }
+ desc.MediaType = ctHeader
+
+ digestHeader := headers.Get("Docker-Content-Digest")
+ if digestHeader == "" {
+ bytes, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ return desc, nil
+ }
+
+ dgst, err := digest.Parse(digestHeader)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ desc.Digest = dgst
+
+ lengthHeader := headers.Get("Content-Length")
+ if lengthHeader == "" {
+ return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
+ }
+ length, err := strconv.ParseInt(lengthHeader, 10, 64)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ desc.Size = length
+
+ return desc, nil
+
+}
+
+// Get issues a HEAD request for a Manifest against its named endpoint in order
+// to construct a descriptor for the tag. If the registry doesn't support HEADing
+// a manifest, fallback to GET.
+func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
+ ref, err := reference.WithTag(t.name, tag)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ u, err := t.ub.BuildManifestURL(ref)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ newRequest := func(method string) (*http.Response, error) {
+ req, err := http.NewRequest(method, u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range distribution.ManifestMediaTypes() {
+ req.Header.Add("Accept", t)
+ }
+ resp, err := t.client.Do(req)
+ return resp, err
+ }
+
+ resp, err := newRequest("HEAD")
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ switch {
+ case resp.StatusCode >= 200 && resp.StatusCode < 400:
+ return descriptorFromResponse(resp)
+ default:
+ // if the response is an error - there will be no body to decode.
+ // Issue a GET request:
+ // - for data from a server that does not handle HEAD
+ // - to get error details in case of a failure
+ resp, err = newRequest("GET")
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 200 && resp.StatusCode < 400 {
+ return descriptorFromResponse(resp)
+ }
+ return distribution.Descriptor{}, HandleErrorResponse(resp)
+ }
+}
+
+func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
+ panic("not implemented")
+}
+
+func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
+ panic("not implemented")
+}
+
+func (t *tags) Untag(ctx context.Context, tag string) error {
+ panic("not implemented")
+}
+
+type manifests struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+ etags map[string]string
+}
+
+func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
+ ref, err := reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return false, err
+ }
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return false, err
+ }
+
+ resp, err := ms.client.Head(u)
+ if err != nil {
+ return false, err
+ }
+
+ if SuccessStatus(resp.StatusCode) {
+ return true, nil
+ } else if resp.StatusCode == http.StatusNotFound {
+ return false, nil
+ }
+ return false, HandleErrorResponse(resp)
+}
+
+// AddEtagToTag allows a client to supply an eTag to Get which will be
+// used for a conditional HTTP request. If the eTag matches, a nil manifest
+// and ErrManifestNotModified error will be returned. etag is automatically
+// quoted when added to this map.
+func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
+ return etagOption{tag, etag}
+}
+
+type etagOption struct{ tag, etag string }
+
+func (o etagOption) Apply(ms distribution.ManifestService) error {
+ if ms, ok := ms.(*manifests); ok {
+ ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
+ return nil
+ }
+ return fmt.Errorf("etag options is a client-only option")
+}
+
+// ReturnContentDigest allows a client to set a the content digest on
+// a successful request from the 'Docker-Content-Digest' header. This
+// returned digest is represents the digest which the registry uses
+// to refer to the content and can be used to delete the content.
+func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
+ return contentDigestOption{dgst}
+}
+
+type contentDigestOption struct{ digest *digest.Digest }
+
+func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
+ return nil
+}
+
+func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
+ var (
+ digestOrTag string
+ ref reference.Named
+ err error
+ contentDgst *digest.Digest
+ )
+
+ for _, option := range options {
+ if opt, ok := option.(distribution.WithTagOption); ok {
+ digestOrTag = opt.Tag
+ ref, err = reference.WithTag(ms.name, opt.Tag)
+ if err != nil {
+ return nil, err
+ }
+ } else if opt, ok := option.(contentDigestOption); ok {
+ contentDgst = opt.digest
+ } else {
+ err := option.Apply(ms)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if digestOrTag == "" {
+ digestOrTag = dgst.String()
+ ref, err = reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range distribution.ManifestMediaTypes() {
+ req.Header.Add("Accept", t)
+ }
+
+ if _, ok := ms.etags[digestOrTag]; ok {
+ req.Header.Set("If-None-Match", ms.etags[digestOrTag])
+ }
+
+ resp, err := ms.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotModified {
+ return nil, distribution.ErrManifestNotModified
+ } else if SuccessStatus(resp.StatusCode) {
+ if contentDgst != nil {
+ dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
+ if err == nil {
+ *contentDgst = dgst
+ }
+ }
+ mt := resp.Header.Get("Content-Type")
+ body, err := ioutil.ReadAll(resp.Body)
+
+ if err != nil {
+ return nil, err
+ }
+ m, _, err := distribution.UnmarshalManifest(mt, body)
+ if err != nil {
+ return nil, err
+ }
+ return m, nil
+ }
+ return nil, HandleErrorResponse(resp)
+}
+
+// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
+// tag name in order to build the correct upload URL.
+func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
+ ref := ms.name
+ var tagged bool
+
+ for _, option := range options {
+ if opt, ok := option.(distribution.WithTagOption); ok {
+ var err error
+ ref, err = reference.WithTag(ref, opt.Tag)
+ if err != nil {
+ return "", err
+ }
+ tagged = true
+ } else {
+ err := option.Apply(ms)
+ if err != nil {
+ return "", err
+ }
+ }
+ }
+ mediaType, p, err := m.Payload()
+ if err != nil {
+ return "", err
+ }
+
+ if !tagged {
+ // generate a canonical digest and Put by digest
+ _, d, err := distribution.UnmarshalManifest(mediaType, p)
+ if err != nil {
+ return "", err
+ }
+ ref, err = reference.WithDigest(ref, d.Digest)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ manifestURL, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return "", err
+ }
+
+ putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
+ if err != nil {
+ return "", err
+ }
+
+ putRequest.Header.Set("Content-Type", mediaType)
+
+ resp, err := ms.client.Do(putRequest)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ dgstHeader := resp.Header.Get("Docker-Content-Digest")
+ dgst, err := digest.Parse(dgstHeader)
+ if err != nil {
+ return "", err
+ }
+
+ return dgst, nil
+ }
+
+ return "", HandleErrorResponse(resp)
+}
+
+func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
+ ref, err := reference.WithDigest(ms.name, dgst)
+ if err != nil {
+ return err
+ }
+ u, err := ms.ub.BuildManifestURL(ref)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := ms.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return HandleErrorResponse(resp)
+}
+
+// todo(richardscothern): Restore interface and implementation with merge of #1050
+/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
+ panic("not supported")
+}*/
+
+type blobs struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+
+ statter distribution.BlobDescriptorService
+ distribution.BlobDeleter
+}
+
+func sanitizeLocation(location, base string) (string, error) {
+ baseURL, err := url.Parse(base)
+ if err != nil {
+ return "", err
+ }
+
+ locationURL, err := url.Parse(location)
+ if err != nil {
+ return "", err
+ }
+
+ return baseURL.ResolveReference(locationURL).String(), nil
+}
+
+func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ return bs.statter.Stat(ctx, dgst)
+
+}
+
+func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
+ reader, err := bs.Open(ctx, dgst)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+
+ return ioutil.ReadAll(reader)
+}
+
+func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return nil, err
+ }
+ blobURL, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return nil, err
+ }
+
+ return transport.NewHTTPReadSeeker(bs.client, blobURL,
+ func(resp *http.Response) error {
+ if resp.StatusCode == http.StatusNotFound {
+ return distribution.ErrBlobUnknown
+ }
+ return HandleErrorResponse(resp)
+ }), nil
+}
+
+func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
+ panic("not implemented")
+}
+
+func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
+ writer, err := bs.Create(ctx)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ dgstr := digest.Canonical.Digester()
+ n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ if n < int64(len(p)) {
+ return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
+ }
+
+ desc := distribution.Descriptor{
+ MediaType: mediaType,
+ Size: int64(len(p)),
+ Digest: dgstr.Digest(),
+ }
+
+ return writer.Commit(ctx, desc)
+}
+
+type optionFunc func(interface{}) error
+
+func (f optionFunc) Apply(v interface{}) error {
+ return f(v)
+}
+
+// WithMountFrom returns a BlobCreateOption which designates that the blob should be
+// mounted from the given canonical reference.
+func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
+ return optionFunc(func(v interface{}) error {
+ opts, ok := v.(*distribution.CreateOptions)
+ if !ok {
+ return fmt.Errorf("unexpected options type: %T", v)
+ }
+
+ opts.Mount.ShouldMount = true
+ opts.Mount.From = ref
+
+ return nil
+ })
+}
+
+func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
+ var opts distribution.CreateOptions
+
+ for _, option := range options {
+ err := option.Apply(&opts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var values []url.Values
+
+ if opts.Mount.ShouldMount {
+ values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
+ }
+
+ u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := bs.client.Post(u, "", nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ switch resp.StatusCode {
+ case http.StatusCreated:
+ desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
+ if err != nil {
+ return nil, err
+ }
+ return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
+ case http.StatusAccepted:
+ // TODO(dmcgowan): Check for invalid UUID
+ uuid := resp.Header.Get("Docker-Upload-UUID")
+ location, err := sanitizeLocation(resp.Header.Get("Location"), u)
+ if err != nil {
+ return nil, err
+ }
+
+ return &httpBlobUpload{
+ statter: bs.statter,
+ client: bs.client,
+ uuid: uuid,
+ startedAt: time.Now(),
+ location: location,
+ }, nil
+ default:
+ return nil, HandleErrorResponse(resp)
+ }
+}
+
+func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
+ panic("not implemented")
+}
+
+func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
+ return bs.statter.Clear(ctx, dgst)
+}
+
+type blobStatter struct {
+ name reference.Named
+ ub *v2.URLBuilder
+ client *http.Client
+}
+
+func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ u, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ resp, err := bs.client.Head(u)
+ if err != nil {
+ return distribution.Descriptor{}, err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ lengthHeader := resp.Header.Get("Content-Length")
+ if lengthHeader == "" {
+ return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
+ }
+
+ length, err := strconv.ParseInt(lengthHeader, 10, 64)
+ if err != nil {
+ return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
+ }
+
+ return distribution.Descriptor{
+ MediaType: resp.Header.Get("Content-Type"),
+ Size: length,
+ Digest: dgst,
+ }, nil
+ } else if resp.StatusCode == http.StatusNotFound {
+ return distribution.Descriptor{}, distribution.ErrBlobUnknown
+ }
+ return distribution.Descriptor{}, HandleErrorResponse(resp)
+}
+
+func buildCatalogValues(maxEntries int, last string) url.Values {
+ values := url.Values{}
+
+ if maxEntries > 0 {
+ values.Add("n", strconv.Itoa(maxEntries))
+ }
+
+ if last != "" {
+ values.Add("last", last)
+ }
+
+ return values
+}
+
+func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
+ ref, err := reference.WithDigest(bs.name, dgst)
+ if err != nil {
+ return err
+ }
+ blobURL, err := bs.ub.BuildBlobURL(ref)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("DELETE", blobURL, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := bs.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if SuccessStatus(resp.StatusCode) {
+ return nil
+ }
+ return HandleErrorResponse(resp)
+}
+
+func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
new file mode 100644
index 000000000..e5ff09d75
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
@@ -0,0 +1,251 @@
+package transport
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+var (
+ contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
+
+ // ErrWrongCodeForByteRange is returned if the client sends a request
+ // with a Range header but the server returns a 2xx or 3xx code other
+ // than 206 Partial Content.
+ ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
+)
+
+// ReadSeekCloser combines io.ReadSeeker with io.Closer.
+type ReadSeekCloser interface {
+ io.ReadSeeker
+ io.Closer
+}
+
+// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
+// request. When seeking and starting a read from a non-zero offset
+// the a "Range" header will be added which sets the offset.
+// TODO(dmcgowan): Move this into a separate utility package
+func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
+ return &httpReadSeeker{
+ client: client,
+ url: url,
+ errorHandler: errorHandler,
+ }
+}
+
+type httpReadSeeker struct {
+ client *http.Client
+ url string
+
+ // errorHandler creates an error from an unsuccessful HTTP response.
+ // This allows the error to be created with the HTTP response body
+ // without leaking the body through a returned error.
+ errorHandler func(*http.Response) error
+
+ size int64
+
+ // rc is the remote read closer.
+ rc io.ReadCloser
+ // readerOffset tracks the offset as of the last read.
+ readerOffset int64
+ // seekOffset allows Seek to override the offset. Seek changes
+ // seekOffset instead of changing readOffset directly so that
+ // connection resets can be delayed and possibly avoided if the
+ // seek is undone (i.e. seeking to the end and then back to the
+ // beginning).
+ seekOffset int64
+ err error
+}
+
+func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
+ if hrs.err != nil {
+ return 0, hrs.err
+ }
+
+ // If we sought to a different position, we need to reset the
+ // connection. This logic is here instead of Seek so that if
+ // a seek is undone before the next read, the connection doesn't
+ // need to be closed and reopened. A common example of this is
+ // seeking to the end to determine the length, and then seeking
+ // back to the original position.
+ if hrs.readerOffset != hrs.seekOffset {
+ hrs.reset()
+ }
+
+ hrs.readerOffset = hrs.seekOffset
+
+ rd, err := hrs.reader()
+ if err != nil {
+ return 0, err
+ }
+
+ n, err = rd.Read(p)
+ hrs.seekOffset += int64(n)
+ hrs.readerOffset += int64(n)
+
+ return n, err
+}
+
+func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ if hrs.err != nil {
+ return 0, hrs.err
+ }
+
+ lastReaderOffset := hrs.readerOffset
+
+ if whence == os.SEEK_SET && hrs.rc == nil {
+ // If no request has been made yet, and we are seeking to an
+ // absolute position, set the read offset as well to avoid an
+ // unnecessary request.
+ hrs.readerOffset = offset
+ }
+
+ _, err := hrs.reader()
+ if err != nil {
+ hrs.readerOffset = lastReaderOffset
+ return 0, err
+ }
+
+ newOffset := hrs.seekOffset
+
+ switch whence {
+ case os.SEEK_CUR:
+ newOffset += offset
+ case os.SEEK_END:
+ if hrs.size < 0 {
+ return 0, errors.New("content length not known")
+ }
+ newOffset = hrs.size + offset
+ case os.SEEK_SET:
+ newOffset = offset
+ }
+
+ if newOffset < 0 {
+ err = errors.New("cannot seek to negative position")
+ } else {
+ hrs.seekOffset = newOffset
+ }
+
+ return hrs.seekOffset, err
+}
+
+func (hrs *httpReadSeeker) Close() error {
+ if hrs.err != nil {
+ return hrs.err
+ }
+
+ // close and release reader chain
+ if hrs.rc != nil {
+ hrs.rc.Close()
+ }
+
+ hrs.rc = nil
+
+ hrs.err = errors.New("httpLayer: closed")
+
+ return nil
+}
+
+func (hrs *httpReadSeeker) reset() {
+ if hrs.err != nil {
+ return
+ }
+ if hrs.rc != nil {
+ hrs.rc.Close()
+ hrs.rc = nil
+ }
+}
+
+func (hrs *httpReadSeeker) reader() (io.Reader, error) {
+ if hrs.err != nil {
+ return nil, hrs.err
+ }
+
+ if hrs.rc != nil {
+ return hrs.rc, nil
+ }
+
+ req, err := http.NewRequest("GET", hrs.url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if hrs.readerOffset > 0 {
+ // If we are at different offset, issue a range request from there.
+ req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
+ // TODO: get context in here
+ // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
+ }
+
+ req.Header.Add("Accept-Encoding", "identity")
+ resp, err := hrs.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // Normally would use client.SuccessStatus, but that would be a cyclic
+ // import
+ if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
+ if hrs.readerOffset > 0 {
+ if resp.StatusCode != http.StatusPartialContent {
+ return nil, ErrWrongCodeForByteRange
+ }
+
+ contentRange := resp.Header.Get("Content-Range")
+ if contentRange == "" {
+ return nil, errors.New("no Content-Range header found in HTTP 206 response")
+ }
+
+ submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
+ if len(submatches) < 4 {
+ return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
+ }
+
+ startByte, err := strconv.ParseUint(submatches[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
+ }
+
+ if startByte != uint64(hrs.readerOffset) {
+ return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
+ }
+
+ endByte, err := strconv.ParseUint(submatches[2], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
+ }
+
+ if submatches[3] == "*" {
+ hrs.size = -1
+ } else {
+ size, err := strconv.ParseUint(submatches[3], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
+ }
+
+ if endByte+1 != size {
+ return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
+ }
+
+ hrs.size = int64(size)
+ }
+ } else if resp.StatusCode == http.StatusOK {
+ hrs.size = resp.ContentLength
+ } else {
+ hrs.size = -1
+ }
+ hrs.rc = resp.Body
+ } else {
+ defer resp.Body.Close()
+ if hrs.errorHandler != nil {
+ return nil, hrs.errorHandler(resp)
+ }
+ return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
+ }
+
+ return hrs.rc, nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
new file mode 100644
index 000000000..30e45fab0
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
@@ -0,0 +1,147 @@
+package transport
+
+import (
+ "io"
+ "net/http"
+ "sync"
+)
+
+// RequestModifier represents an object which will do an inplace
+// modification of an HTTP request.
+type RequestModifier interface {
+ ModifyRequest(*http.Request) error
+}
+
+type headerModifier http.Header
+
+// NewHeaderRequestModifier returns a new RequestModifier which will
+// add the given headers to a request.
+func NewHeaderRequestModifier(header http.Header) RequestModifier {
+ return headerModifier(header)
+}
+
+func (h headerModifier) ModifyRequest(req *http.Request) error {
+ for k, s := range http.Header(h) {
+ req.Header[k] = append(req.Header[k], s...)
+ }
+
+ return nil
+}
+
+// NewTransport creates a new transport which will apply modifiers to
+// the request on a RoundTrip call.
+func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
+ return &transport{
+ Modifiers: modifiers,
+ Base: base,
+ }
+}
+
+// transport is an http.RoundTripper that makes HTTP requests after
+// copying and modifying the request
+type transport struct {
+ Modifiers []RequestModifier
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req2 := cloneRequest(req)
+ for _, modifier := range t.Modifiers {
+ if err := modifier.ModifyRequest(req2); err != nil {
+ return nil, err
+ }
+ }
+
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
new file mode 100644
index 000000000..10a390919
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
@@ -0,0 +1,35 @@
+// Package cache provides facilities to speed up access to the storage
+// backend.
+package cache
+
+import (
+ "fmt"
+
+ "github.com/docker/distribution"
+)
+
+// BlobDescriptorCacheProvider provides repository scoped
+// BlobDescriptorService cache instances and a global descriptor cache.
+type BlobDescriptorCacheProvider interface {
+ distribution.BlobDescriptorService
+
+ RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
+}
+
+// ValidateDescriptor provides a helper function to ensure that caches have
+// common criteria for admitting descriptors.
+func ValidateDescriptor(desc distribution.Descriptor) error {
+ if err := desc.Digest.Validate(); err != nil {
+ return err
+ }
+
+ if desc.Size < 0 {
+ return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
+ }
+
+ if desc.MediaType == "" {
+ return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
new file mode 100644
index 000000000..f647616bc
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
@@ -0,0 +1,101 @@
+package cache
+
+import (
+ "github.com/docker/distribution/context"
+ "github.com/opencontainers/go-digest"
+
+ "github.com/docker/distribution"
+)
+
+// Metrics is used to hold metric counters
+// related to the number of times a cache was
+// hit or missed.
+type Metrics struct {
+ Requests uint64
+ Hits uint64
+ Misses uint64
+}
+
+// MetricsTracker represents a metric tracker
+// which simply counts the number of hits and misses.
+type MetricsTracker interface {
+ Hit()
+ Miss()
+ Metrics() Metrics
+}
+
+type cachedBlobStatter struct {
+ cache distribution.BlobDescriptorService
+ backend distribution.BlobDescriptorService
+ tracker MetricsTracker
+}
+
+// NewCachedBlobStatter creates a new statter which prefers a cache and
+// falls back to a backend.
+func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
+ return &cachedBlobStatter{
+ cache: cache,
+ backend: backend,
+ }
+}
+
+// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
+// falls back to a backend. Hits and misses will send to the tracker.
+func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
+ return &cachedBlobStatter{
+ cache: cache,
+ backend: backend,
+ tracker: tracker,
+ }
+}
+
+func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ desc, err := cbds.cache.Stat(ctx, dgst)
+ if err != nil {
+ if err != distribution.ErrBlobUnknown {
+ context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
+ }
+
+ goto fallback
+ }
+
+ if cbds.tracker != nil {
+ cbds.tracker.Hit()
+ }
+ return desc, nil
+fallback:
+ if cbds.tracker != nil {
+ cbds.tracker.Miss()
+ }
+ desc, err = cbds.backend.Stat(ctx, dgst)
+ if err != nil {
+ return desc, err
+ }
+
+ if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
+ context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+ }
+
+ return desc, err
+
+}
+
+func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
+ err := cbds.cache.Clear(ctx, dgst)
+ if err != nil {
+ return err
+ }
+
+ err = cbds.backend.Clear(ctx, dgst)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
+ context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
new file mode 100644
index 000000000..b2fcaf4e8
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
@@ -0,0 +1,179 @@
+package memory
+
+import (
+ "sync"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/storage/cache"
+ "github.com/opencontainers/go-digest"
+)
+
+type inMemoryBlobDescriptorCacheProvider struct {
+ global *mapBlobDescriptorCache
+ repositories map[string]*mapBlobDescriptorCache
+ mu sync.RWMutex
+}
+
+// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
+// storing blob descriptor data.
+func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
+ return &inMemoryBlobDescriptorCacheProvider{
+ global: newMapBlobDescriptorCache(),
+ repositories: make(map[string]*mapBlobDescriptorCache),
+ }
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
+ if _, err := reference.ParseNormalizedNamed(repo); err != nil {
+ return nil, err
+ }
+
+ imbdcp.mu.RLock()
+ defer imbdcp.mu.RUnlock()
+
+ return &repositoryScopedInMemoryBlobDescriptorCache{
+ repo: repo,
+ parent: imbdcp,
+ repository: imbdcp.repositories[repo],
+ }, nil
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ return imbdcp.global.Stat(ctx, dgst)
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
+ return imbdcp.global.Clear(ctx, dgst)
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ _, err := imbdcp.Stat(ctx, dgst)
+ if err == distribution.ErrBlobUnknown {
+
+ if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
+ // if the digests differ, set the other canonical mapping
+ if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
+ return err
+ }
+ }
+
+ // unknown, just set it
+ return imbdcp.global.SetDescriptor(ctx, dgst, desc)
+ }
+
+ // we already know it, do nothing
+ return err
+}
+
+// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
+// repository cache. Instances are not thread-safe but the delegated
+// operations are.
+type repositoryScopedInMemoryBlobDescriptorCache struct {
+ repo string
+ parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
+ repository *mapBlobDescriptorCache
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ rsimbdcp.parent.mu.Lock()
+ repo := rsimbdcp.repository
+ rsimbdcp.parent.mu.Unlock()
+
+ if repo == nil {
+ return distribution.Descriptor{}, distribution.ErrBlobUnknown
+ }
+
+ return repo.Stat(ctx, dgst)
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
+ rsimbdcp.parent.mu.Lock()
+ repo := rsimbdcp.repository
+ rsimbdcp.parent.mu.Unlock()
+
+ if repo == nil {
+ return distribution.ErrBlobUnknown
+ }
+
+ return repo.Clear(ctx, dgst)
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ rsimbdcp.parent.mu.Lock()
+ repo := rsimbdcp.repository
+ if repo == nil {
+ // allocate map since we are setting it now.
+ var ok bool
+ // have to read back value since we may have allocated elsewhere.
+ repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
+ if !ok {
+ repo = newMapBlobDescriptorCache()
+ rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
+ }
+ rsimbdcp.repository = repo
+ }
+ rsimbdcp.parent.mu.Unlock()
+
+ if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
+ return err
+ }
+
+ return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
+}
+
+// mapBlobDescriptorCache provides a simple map-based implementation of the
+// descriptor cache.
+type mapBlobDescriptorCache struct {
+ descriptors map[digest.Digest]distribution.Descriptor
+ mu sync.RWMutex
+}
+
+var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
+
+func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
+ return &mapBlobDescriptorCache{
+ descriptors: make(map[digest.Digest]distribution.Descriptor),
+ }
+}
+
+func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+ if err := dgst.Validate(); err != nil {
+ return distribution.Descriptor{}, err
+ }
+
+ mbdc.mu.RLock()
+ defer mbdc.mu.RUnlock()
+
+ desc, ok := mbdc.descriptors[dgst]
+ if !ok {
+ return distribution.Descriptor{}, distribution.ErrBlobUnknown
+ }
+
+ return desc, nil
+}
+
+func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
+ mbdc.mu.Lock()
+ defer mbdc.mu.Unlock()
+
+ delete(mbdc.descriptors, dgst)
+ return nil
+}
+
+func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+ if err := dgst.Validate(); err != nil {
+ return err
+ }
+
+ if err := cache.ValidateDescriptor(desc); err != nil {
+ return err
+ }
+
+ mbdc.mu.Lock()
+ defer mbdc.mu.Unlock()
+
+ mbdc.descriptors[dgst] = desc
+ return nil
+}
diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go
new file mode 100644
index 000000000..503056596
--- /dev/null
+++ b/vendor/github.com/docker/distribution/tags.go
@@ -0,0 +1,27 @@
+package distribution
+
+import (
+ "github.com/docker/distribution/context"
+)
+
+// TagService provides access to information about tagged objects.
+type TagService interface {
+ // Get retrieves the descriptor identified by the tag. Some
+ // implementations may differentiate between "trusted" tags and
+ // "untrusted" tags. If a tag is "untrusted", the mapping will be returned
+ // as an ErrTagUntrusted error, with the target descriptor.
+ Get(ctx context.Context, tag string) (Descriptor, error)
+
+ // Tag associates the tag with the provided descriptor, updating the
+ // current association, if needed.
+ Tag(ctx context.Context, tag string, desc Descriptor) error
+
+ // Untag removes the given tag association
+ Untag(ctx context.Context, tag string) error
+
+ // All returns the set of tags managed by this tag service
+ All(ctx context.Context) ([]string, error)
+
+ // Lookup returns the set of tags referencing the given digest.
+ Lookup(ctx context.Context, digest Descriptor) ([]string, error)
+}
diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go
new file mode 100644
index 000000000..d433ccaf5
--- /dev/null
+++ b/vendor/github.com/docker/distribution/uuid/uuid.go
@@ -0,0 +1,126 @@
+// Package uuid provides simple UUID generation. Only version 4 style UUIDs
+// can be generated.
+//
+// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
+package uuid
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+ "time"
+)
+
+const (
+ // Bits is the number of bits in a UUID
+ Bits = 128
+
+ // Size is the number of bytes in a UUID
+ Size = Bits / 8
+
+ format = "%08x-%04x-%04x-%04x-%012x"
+)
+
+var (
+ // ErrUUIDInvalid indicates a parsed string is not a valid uuid.
+ ErrUUIDInvalid = fmt.Errorf("invalid uuid")
+
+ // Loggerf can be used to override the default logging destination. Such
+ // log messages in this library should be logged at warning or higher.
+ Loggerf = func(format string, args ...interface{}) {}
+)
+
+// UUID represents a UUID value. UUIDs can be compared and set to other values
+// and accessed by byte.
+type UUID [Size]byte
+
+// Generate creates a new, version 4 uuid.
+func Generate() (u UUID) {
+ const (
+ // ensures we backoff for less than 450ms total. Use the following to
+ // select new value, in units of 10ms:
+ // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
+ maxretries = 9
+ backoff = time.Millisecond * 10
+ )
+
+ var (
+ totalBackoff time.Duration
+ count int
+ retries int
+ )
+
+ for {
+ // This should never block but the read may fail. Because of this,
+ // we just try to read the random number generator until we get
+ // something. This is a very rare condition but may happen.
+ b := time.Duration(retries) * backoff
+ time.Sleep(b)
+ totalBackoff += b
+
+ n, err := io.ReadFull(rand.Reader, u[count:])
+ if err != nil {
+ if retryOnError(err) && retries < maxretries {
+ count += n
+ retries++
+ Loggerf("error generating version 4 uuid, retrying: %v", err)
+ continue
+ }
+
+ // Any other errors represent a system problem. What did someone
+ // do to /dev/urandom?
+ panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
+ }
+
+ break
+ }
+
+ u[6] = (u[6] & 0x0f) | 0x40 // set version byte
+ u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
+
+ return u
+}
+
+// Parse attempts to extract a uuid from the string or returns an error.
+func Parse(s string) (u UUID, err error) {
+ if len(s) != 36 {
+ return UUID{}, ErrUUIDInvalid
+ }
+
+ // create stack addresses for each section of the uuid.
+ p := make([][]byte, 5)
+
+ if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
+ return u, err
+ }
+
+ copy(u[0:4], p[0])
+ copy(u[4:6], p[1])
+ copy(u[6:8], p[2])
+ copy(u[8:10], p[3])
+ copy(u[10:16], p[4])
+
+ return
+}
+
+func (u UUID) String() string {
+ return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
+
+// retryOnError tries to detect whether or not retrying would be fruitful.
+func retryOnError(err error) bool {
+ switch err := err.(type) {
+ case *os.PathError:
+ return retryOnError(err.Err) // unpack the target error
+ case syscall.Errno:
+ if err == syscall.EPERM {
+ // EPERM represents an entropy pool exhaustion, a condition under
+ // which we backoff and retry.
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf
new file mode 100644
index 000000000..d67edd779
--- /dev/null
+++ b/vendor/github.com/docker/distribution/vendor.conf
@@ -0,0 +1,43 @@
+github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
+github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
+github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
+github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716
+github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
+github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
+github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
+github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
+github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
+github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
+github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
+github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
+github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
+github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
+github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
+github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a
+github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
+github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
+github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
+github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
+github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
+github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
+github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
+github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870
+github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
+github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
+github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
+github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6
+golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b
+golang.org/x/net 4876518f9e71663000c348837735820161a42df7
+golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf
+golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
+google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54
+google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19
+google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
+google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
+gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
+gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
+gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
+rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
+github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE
new file mode 100644
index 000000000..1ea555e2a
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2016 David Calavera
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/docker/docker-credential-helpers/README.md b/vendor/github.com/docker/docker-credential-helpers/README.md
new file mode 100644
index 000000000..f9cbc3fb5
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/README.md
@@ -0,0 +1,82 @@
+## Introduction
+
+docker-credential-helpers is a suite of programs to use native stores to keep Docker credentials safe.
+
+## Installation
+
+Go to the [Releases](https://github.com/docker/docker-credential-helpers/releases) page and download the binary that works better for you. Put that binary in your `$PATH`, so Docker can find it.
+
+### Building from scratch
+
+The programs in this repository are written with the Go programming language. These instructions assume that you have previous knowledge about the language and you have it installed in your machine.
+
+1 - Download the source and put it in your `$GOPATH` with `go get`.
+
+```
+$ go get github.com/docker/docker-credential-helpers
+```
+
+2 - Use `make` to build the program you want. That will leave any executable in the `bin` directory inside the repository.
+
+```
+$ cd $GOPATH/docker/docker-credentials-helpers
+$ make osxkeychain
+```
+
+3 - Put that binary in your `$PATH`, so Docker can find it.
+
+## Usage
+
+### With the Docker Engine
+
+Set the `credsStore` option in your `.docker/config.json` file with the suffix of the program you want to use. For instance, set it to `osxkeychain` if you want to use `docker-credential-osxkeychain`.
+
+```json
+{
+ "credsStore": "osxkeychain"
+}
+```
+
+### With other command line applications
+
+The sub-package [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) includes
+functions to call external programs from your own command line applications.
+
+There are three things you need to know if you need to interact with a helper:
+
+1. The name of the program to execute, for instance `docker-credential-osxkeychain`.
+2. The server address to identify the credentials, for instance `https://example.com`.
+3. The username and secret to store, when you want to store credentials.
+
+You can see examples of each function in the [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) documentation.
+
+### Available programs
+
+1. osxkeychain: Provides a helper to use the OS X keychain as credentials store.
+2. secretservice: Provides a helper to use the D-Bus secret service as credentials store.
+3. wincred: Provides a helper to use Windows credentials manager as store.
+4. pass: Provides a helper to use `pass` as credentials store.
+
+#### Note
+
+`pass` needs to be configured for `docker-credential-pass` to work properly.
+It must be initialized with a `gpg2` key ID. Make sure your GPG key exists is in `gpg2` keyring as `pass` uses `gpg2` instead of the regular `gpg`.
+
+## Development
+
+A credential helper can be any program that can read values from the standard input. We use the first argument in the command line to differentiate the kind of command to execute. There are four valid values:
+
+- `store`: Adds credentials to the keychain. The payload in the standard input is a JSON document with `ServerURL`, `Username` and `Secret`.
+- `get`: Retrieves credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`.
+- `erase`: Removes credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`.
+- `list`: Lists stored credentials. There is no standard input payload.
+
+This repository also includes libraries to implement new credentials programs in Go. Adding a new helper program is pretty easy. You can see how the OS X keychain helper works in the [osxkeychain](osxkeychain) directory.
+
+1. Implement the interface `credentials.Helper` in `YOUR_PACKAGE/YOUR_PACKAGE_$GOOS.go`
+2. Create a main program in `YOUR_PACKAGE/cmd/main_$GOOS.go`.
+3. Add make tasks to build your program and run tests.
+
+## License
+
+MIT. See [LICENSE](LICENSE) for more information.
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go
new file mode 100644
index 000000000..d1d0434cb
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go
@@ -0,0 +1,121 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker-credential-helpers/credentials"
+)
+
+// isValidCredsMessage checks if 'msg' contains invalid credentials error message.
+// It returns whether the logs are free of invalid credentials errors and the error if it isn't.
+// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername.
+func isValidCredsMessage(msg string) error {
+ if credentials.IsCredentialsMissingServerURLMessage(msg) {
+ return credentials.NewErrCredentialsMissingServerURL()
+ }
+
+ if credentials.IsCredentialsMissingUsernameMessage(msg) {
+ return credentials.NewErrCredentialsMissingUsername()
+ }
+
+ return nil
+}
+
+// Store uses an external program to save credentials.
+func Store(program ProgramFunc, creds *credentials.Credentials) error {
+ cmd := program("store")
+
+ buffer := new(bytes.Buffer)
+ if err := json.NewEncoder(buffer).Encode(creds); err != nil {
+ return err
+ }
+ cmd.Input(buffer)
+
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
+ }
+
+ return nil
+}
+
+// Get executes an external program to get the credentials from a native store.
+func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
+ cmd := program("get")
+ cmd.Input(strings.NewReader(serverURL))
+
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if credentials.IsErrCredentialsNotFoundMessage(t) {
+ return nil, credentials.NewErrCredentialsNotFound()
+ }
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
+ }
+
+ resp := &credentials.Credentials{
+ ServerURL: serverURL,
+ }
+
+ if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// Erase executes a program to remove the server credentials from the native store.
+func Erase(program ProgramFunc, serverURL string) error {
+ cmd := program("erase")
+ cmd.Input(strings.NewReader(serverURL))
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t)
+ }
+
+ return nil
+}
+
+// List executes a program to list server credentials in the native store.
+func List(program ProgramFunc) (map[string]string, error) {
+ cmd := program("list")
+ cmd.Input(strings.NewReader("unused"))
+ out, err := cmd.Output()
+ if err != nil {
+ t := strings.TrimSpace(string(out))
+
+ if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+ err = isValidErr
+ }
+
+ return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t)
+ }
+
+ var resp map[string]string
+ if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go
new file mode 100644
index 000000000..8da334306
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+)
+
+// Program is an interface to execute external programs.
+type Program interface {
+ Output() ([]byte, error)
+ Input(in io.Reader)
+}
+
+// ProgramFunc is a type of function that initializes programs based on arguments.
+type ProgramFunc func(args ...string) Program
+
+// NewShellProgramFunc creates programs that are executed in a Shell.
+func NewShellProgramFunc(name string) ProgramFunc {
+ return NewShellProgramFuncWithEnv(name, nil)
+}
+
+// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables
+func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc {
+ return func(args ...string) Program {
+ return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)}
+ }
+}
+
+func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
+ programCmd := exec.Command(commandName, args...)
+ programCmd.Env = os.Environ()
+ if env != nil {
+ for k, v := range *env {
+ programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
+ }
+ }
+ programCmd.Stderr = os.Stderr
+ return programCmd
+}
+
+// Shell invokes shell commands to talk with a remote credentials helper.
+type Shell struct {
+ cmd *exec.Cmd
+}
+
+// Output returns responses from the remote credentials helper.
+func (s *Shell) Output() ([]byte, error) {
+ return s.cmd.Output()
+}
+
+// Input sets the input to send to a remote credentials helper.
+func (s *Shell) Input(in io.Reader) {
+ s.cmd.Stdin = in
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
new file mode 100644
index 000000000..da8b594e7
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
@@ -0,0 +1,186 @@
+package credentials
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// Credentials holds the information shared between docker and the credentials store.
+type Credentials struct {
+ ServerURL string
+ Username string
+ Secret string
+}
+
+// isValid checks the integrity of Credentials object such that no credentials lack
+// a server URL or a username.
+// It returns whether the credentials are valid and the error if it isn't.
+// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername
+func (c *Credentials) isValid() (bool, error) {
+ if len(c.ServerURL) == 0 {
+ return false, NewErrCredentialsMissingServerURL()
+ }
+
+ if len(c.Username) == 0 {
+ return false, NewErrCredentialsMissingUsername()
+ }
+
+ return true, nil
+}
+
+// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling.
+// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain,
+// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials"
+var CredsLabel = "Docker Credentials"
+
+// SetCredsLabel is a simple setter for CredsLabel
+func SetCredsLabel(label string) {
+ CredsLabel = label
+}
+
+// Serve initializes the credentials helper and parses the action argument.
+// This function is designed to be called from a command line interface.
+// It uses os.Args[1] as the key for the action.
+// It uses os.Stdin as input and os.Stdout as output.
+// This function terminates the program with os.Exit(1) if there is an error.
+func Serve(helper Helper) {
+ var err error
+ if len(os.Args) != 2 {
+ err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
+ }
+
+ if err == nil {
+ err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stdout, "%v\n", err)
+ os.Exit(1)
+ }
+}
+
+// HandleCommand uses a helper and a key to run a credential action.
+func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
+ switch key {
+ case "store":
+ return Store(helper, in)
+ case "get":
+ return Get(helper, in, out)
+ case "erase":
+ return Erase(helper, in)
+ case "list":
+ return List(helper, out)
+ case "version":
+ return PrintVersion(out)
+ }
+ return fmt.Errorf("Unknown credential action `%s`", key)
+}
+
+// Store uses a helper and an input reader to save credentials.
+// The reader must contain the JSON serialization of a Credentials struct.
+func Store(helper Helper, reader io.Reader) error {
+ scanner := bufio.NewScanner(reader)
+
+ buffer := new(bytes.Buffer)
+ for scanner.Scan() {
+ buffer.Write(scanner.Bytes())
+ }
+
+ if err := scanner.Err(); err != nil && err != io.EOF {
+ return err
+ }
+
+ var creds Credentials
+ if err := json.NewDecoder(buffer).Decode(&creds); err != nil {
+ return err
+ }
+
+ if ok, err := creds.isValid(); !ok {
+ return err
+ }
+
+ return helper.Add(&creds)
+}
+
+// Get retrieves the credentials for a given server url.
+// The reader must contain the server URL to search.
+// The writer is used to write the JSON serialization of the credentials.
+func Get(helper Helper, reader io.Reader, writer io.Writer) error {
+ scanner := bufio.NewScanner(reader)
+
+ buffer := new(bytes.Buffer)
+ for scanner.Scan() {
+ buffer.Write(scanner.Bytes())
+ }
+
+ if err := scanner.Err(); err != nil && err != io.EOF {
+ return err
+ }
+
+ serverURL := strings.TrimSpace(buffer.String())
+ if len(serverURL) == 0 {
+ return NewErrCredentialsMissingServerURL()
+ }
+
+ username, secret, err := helper.Get(serverURL)
+ if err != nil {
+ return err
+ }
+
+ resp := Credentials{
+ ServerURL: serverURL,
+ Username: username,
+ Secret: secret,
+ }
+
+ buffer.Reset()
+ if err := json.NewEncoder(buffer).Encode(resp); err != nil {
+ return err
+ }
+
+ fmt.Fprint(writer, buffer.String())
+ return nil
+}
+
+// Erase removes credentials from the store.
+// The reader must contain the server URL to remove.
+func Erase(helper Helper, reader io.Reader) error {
+ scanner := bufio.NewScanner(reader)
+
+ buffer := new(bytes.Buffer)
+ for scanner.Scan() {
+ buffer.Write(scanner.Bytes())
+ }
+
+ if err := scanner.Err(); err != nil && err != io.EOF {
+ return err
+ }
+
+ serverURL := strings.TrimSpace(buffer.String())
+ if len(serverURL) == 0 {
+ return NewErrCredentialsMissingServerURL()
+ }
+
+ return helper.Delete(serverURL)
+}
+
+//List returns all the serverURLs of keys in
+//the OS store as a list of strings
+func List(helper Helper, writer io.Writer) error {
+ accts, err := helper.List()
+ if err != nil {
+ return err
+ }
+ return json.NewEncoder(writer).Encode(accts)
+}
+
+//PrintVersion outputs the current version.
+func PrintVersion(writer io.Writer) error {
+ fmt.Fprintln(writer, Version)
+ return nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
new file mode 100644
index 000000000..fe6a5aef4
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
@@ -0,0 +1,102 @@
+package credentials
+
+const (
+ // ErrCredentialsNotFound standardizes the not found error, so every helper returns
+ // the same message and docker can handle it properly.
+ errCredentialsNotFoundMessage = "credentials not found in native keychain"
+
+ // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize
+ // invalid credentials or credentials management operations
+ errCredentialsMissingServerURLMessage = "no credentials server URL"
+ errCredentialsMissingUsernameMessage = "no credentials username"
+)
+
+// errCredentialsNotFound represents an error
+// raised when credentials are not in the store.
+type errCredentialsNotFound struct{}
+
+// Error returns the standard error message
+// for when the credentials are not in the store.
+func (errCredentialsNotFound) Error() string {
+ return errCredentialsNotFoundMessage
+}
+
+// NewErrCredentialsNotFound creates a new error
+// for when the credentials are not in the store.
+func NewErrCredentialsNotFound() error {
+ return errCredentialsNotFound{}
+}
+
+// IsErrCredentialsNotFound returns true if the error
+// was caused by not having a set of credentials in a store.
+func IsErrCredentialsNotFound(err error) bool {
+ _, ok := err.(errCredentialsNotFound)
+ return ok
+}
+
+// IsErrCredentialsNotFoundMessage returns true if the error
+// was caused by not having a set of credentials in a store.
+//
+// This function helps to check messages returned by an
+// external program via its standard output.
+func IsErrCredentialsNotFoundMessage(err string) bool {
+ return err == errCredentialsNotFoundMessage
+}
+
+// errCredentialsMissingServerURL represents an error raised
+// when the credentials object has no server URL or when no
+// server URL is provided to a credentials operation requiring
+// one.
+type errCredentialsMissingServerURL struct{}
+
+func (errCredentialsMissingServerURL) Error() string {
+ return errCredentialsMissingServerURLMessage
+}
+
+// errCredentialsMissingUsername represents an error raised
+// when the credentials object has no username or when no
+// username is provided to a credentials operation requiring
+// one.
+type errCredentialsMissingUsername struct{}
+
+func (errCredentialsMissingUsername) Error() string {
+ return errCredentialsMissingUsernameMessage
+}
+
+// NewErrCredentialsMissingServerURL creates a new error for
+// errCredentialsMissingServerURL.
+func NewErrCredentialsMissingServerURL() error {
+ return errCredentialsMissingServerURL{}
+}
+
+// NewErrCredentialsMissingUsername creates a new error for
+// errCredentialsMissingUsername.
+func NewErrCredentialsMissingUsername() error {
+ return errCredentialsMissingUsername{}
+}
+
+// IsCredentialsMissingServerURL returns true if the error
+// was an errCredentialsMissingServerURL.
+func IsCredentialsMissingServerURL(err error) bool {
+ _, ok := err.(errCredentialsMissingServerURL)
+ return ok
+}
+
+// IsCredentialsMissingServerURLMessage checks for an
+// errCredentialsMissingServerURL in the error message.
+func IsCredentialsMissingServerURLMessage(err string) bool {
+ return err == errCredentialsMissingServerURLMessage
+}
+
+// IsCredentialsMissingUsername returns true if the error
+// was an errCredentialsMissingUsername.
+func IsCredentialsMissingUsername(err error) bool {
+ _, ok := err.(errCredentialsMissingUsername)
+ return ok
+}
+
+// IsCredentialsMissingUsernameMessage checks for an
+// errCredentialsMissingUsername in the error message.
+func IsCredentialsMissingUsernameMessage(err string) bool {
+ return err == errCredentialsMissingUsernameMessage
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
new file mode 100644
index 000000000..135acd254
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
@@ -0,0 +1,14 @@
+package credentials
+
+// Helper is the interface a credentials store helper must implement.
+type Helper interface {
+ // Add appends credentials to the store.
+ Add(*Credentials) error
+ // Delete removes credentials from the store.
+ Delete(serverURL string) error
+ // Get retrieves credentials from the store.
+ // It returns username and secret as strings.
+ Get(serverURL string) (string, string, error)
+ // List returns the stored serverURLs and their associated usernames.
+ List() (map[string]string, error)
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
new file mode 100644
index 000000000..033a5fee5
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
@@ -0,0 +1,4 @@
+package credentials
+
+// Version holds a string describing the current version
+const Version = "0.6.0"
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c
new file mode 100644
index 000000000..f84d61ee5
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c
@@ -0,0 +1,228 @@
+#include "osxkeychain_darwin.h"
+#include <CoreFoundation/CoreFoundation.h>
+#include <Foundation/NSValue.h>
+#include <stdio.h>
+#include <string.h>
+
+char *get_error(OSStatus status) {
+ char *buf = malloc(128);
+ CFStringRef str = SecCopyErrorMessageString(status, NULL);
+ int success = CFStringGetCString(str, buf, 128, kCFStringEncodingUTF8);
+ if (!success) {
+ strncpy(buf, "Unknown error", 128);
+ }
+ return buf;
+}
+
+char *keychain_add(struct Server *server, char *label, char *username, char *secret) {
+ SecKeychainItemRef item;
+
+ OSStatus status = SecKeychainAddInternetPassword(
+ NULL,
+ strlen(server->host), server->host,
+ 0, NULL,
+ strlen(username), username,
+ strlen(server->path), server->path,
+ server->port,
+ server->proto,
+ kSecAuthenticationTypeDefault,
+ strlen(secret), secret,
+ &item
+ );
+
+ if (status) {
+ return get_error(status);
+ }
+
+ SecKeychainAttribute attribute;
+ SecKeychainAttributeList attrs;
+ attribute.tag = kSecLabelItemAttr;
+ attribute.data = label;
+ attribute.length = strlen(label);
+ attrs.count = 1;
+ attrs.attr = &attribute;
+
+ status = SecKeychainItemModifyContent(item, &attrs, 0, NULL);
+
+ if (status) {
+ return get_error(status);
+ }
+
+ return NULL;
+}
+
+char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret) {
+ char *tmp;
+ SecKeychainItemRef item;
+
+ OSStatus status = SecKeychainFindInternetPassword(
+ NULL,
+ strlen(server->host), server->host,
+ 0, NULL,
+ 0, NULL,
+ strlen(server->path), server->path,
+ server->port,
+ server->proto,
+ kSecAuthenticationTypeDefault,
+ secret_l, (void **)&tmp,
+ &item);
+
+ if (status) {
+ return get_error(status);
+ }
+
+ *secret = strdup(tmp);
+ SecKeychainItemFreeContent(NULL, tmp);
+
+ SecKeychainAttributeList list;
+ SecKeychainAttribute attr;
+
+ list.count = 1;
+ list.attr = &attr;
+ attr.tag = kSecAccountItemAttr;
+
+ status = SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL);
+ if (status) {
+ return get_error(status);
+ }
+
+ *username = strdup(attr.data);
+ *username_l = attr.length;
+ SecKeychainItemFreeContent(&list, NULL);
+
+ return NULL;
+}
+
+char *keychain_delete(struct Server *server) {
+ SecKeychainItemRef item;
+
+ OSStatus status = SecKeychainFindInternetPassword(
+ NULL,
+ strlen(server->host), server->host,
+ 0, NULL,
+ 0, NULL,
+ strlen(server->path), server->path,
+ server->port,
+ server->proto,
+ kSecAuthenticationTypeDefault,
+ 0, NULL,
+ &item);
+
+ if (status) {
+ return get_error(status);
+ }
+
+ status = SecKeychainItemDelete(item);
+ if (status) {
+ return get_error(status);
+ }
+ return NULL;
+}
+
+char * CFStringToCharArr(CFStringRef aString) {
+ if (aString == NULL) {
+ return NULL;
+ }
+ CFIndex length = CFStringGetLength(aString);
+ CFIndex maxSize =
+ CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
+ char *buffer = (char *)malloc(maxSize);
+ if (CFStringGetCString(aString, buffer, maxSize,
+ kCFStringEncodingUTF8)) {
+ return buffer;
+ }
+ return NULL;
+}
+
+char *keychain_list(char *credsLabel, char *** paths, char *** accts, unsigned int *list_l) {
+ CFStringRef credsLabelCF = CFStringCreateWithCString(NULL, credsLabel, kCFStringEncodingUTF8);
+ CFMutableDictionaryRef query = CFDictionaryCreateMutable (NULL, 1, NULL, NULL);
+ CFDictionaryAddValue(query, kSecClass, kSecClassInternetPassword);
+ CFDictionaryAddValue(query, kSecReturnAttributes, kCFBooleanTrue);
+ CFDictionaryAddValue(query, kSecMatchLimit, kSecMatchLimitAll);
+ CFDictionaryAddValue(query, kSecAttrLabel, credsLabelCF);
+ //Use this query dictionary
+ CFTypeRef result= NULL;
+ OSStatus status = SecItemCopyMatching(
+ query,
+ &result);
+
+ CFRelease(credsLabelCF);
+
+ //Ran a search and store the results in result
+ if (status) {
+ return get_error(status);
+ }
+ CFIndex numKeys = CFArrayGetCount(result);
+ *paths = (char **) malloc((int)sizeof(char *)*numKeys);
+ *accts = (char **) malloc((int)sizeof(char *)*numKeys);
+ //result is of type CFArray
+ for(CFIndex i=0; i<numKeys; i++) {
+ CFDictionaryRef currKey = CFArrayGetValueAtIndex(result,i);
+
+ CFStringRef protocolTmp = CFDictionaryGetValue(currKey, CFSTR("ptcl"));
+ if (protocolTmp != NULL) {
+ CFStringRef protocolStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), protocolTmp);
+ if (CFStringCompare(protocolStr, CFSTR("htps"), 0) == kCFCompareEqualTo) {
+ protocolTmp = CFSTR("https://");
+ }
+ else {
+ protocolTmp = CFSTR("http://");
+ }
+ CFRelease(protocolStr);
+ }
+ else {
+ char * path = "0";
+ char * acct = "0";
+ (*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)));
+ memcpy((*paths)[i], path, sizeof(char)*(strlen(path)));
+ (*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)));
+ memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)));
+ continue;
+ }
+
+ CFMutableStringRef str = CFStringCreateMutableCopy(NULL, 0, protocolTmp);
+ CFStringRef serverTmp = CFDictionaryGetValue(currKey, CFSTR("srvr"));
+ if (serverTmp != NULL) {
+ CFStringAppend(str, serverTmp);
+ }
+
+ CFStringRef pathTmp = CFDictionaryGetValue(currKey, CFSTR("path"));
+ if (pathTmp != NULL) {
+ CFStringAppend(str, pathTmp);
+ }
+
+ const NSNumber * portTmp = CFDictionaryGetValue(currKey, CFSTR("port"));
+ if (portTmp != NULL && portTmp.integerValue != 0) {
+ CFStringRef portStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), portTmp);
+ CFStringAppend(str, CFSTR(":"));
+ CFStringAppend(str, portStr);
+ CFRelease(portStr);
+ }
+
+ CFStringRef acctTmp = CFDictionaryGetValue(currKey, CFSTR("acct"));
+ if (acctTmp == NULL) {
+ acctTmp = CFSTR("account not defined");
+ }
+
+ char * path = CFStringToCharArr(str);
+ char * acct = CFStringToCharArr(acctTmp);
+
+ //We now have all we need, username and servername. Now export this to .go
+ (*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)+1));
+ memcpy((*paths)[i], path, sizeof(char)*(strlen(path)+1));
+ (*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)+1));
+ memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)+1));
+
+ CFRelease(str);
+ }
+ *list_l = (int)numKeys;
+ return NULL;
+}
+
+void freeListData(char *** data, unsigned int length) {
+ for(int i=0; i<length; i++) {
+ free((*data)[i]);
+ }
+ free(*data);
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go
new file mode 100644
index 000000000..439126761
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.go
@@ -0,0 +1,196 @@
+package osxkeychain
+
+/*
+#cgo CFLAGS: -x objective-c -mmacosx-version-min=10.10
+#cgo LDFLAGS: -framework Security -framework Foundation -mmacosx-version-min=10.10
+
+#include "osxkeychain_darwin.h"
+#include <stdlib.h>
+*/
+import "C"
+import (
+ "errors"
+ "net/url"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ "github.com/docker/docker-credential-helpers/credentials"
+)
+
+// errCredentialsNotFound is the specific error message returned by OS X
+// when the credentials are not in the keychain.
+const errCredentialsNotFound = "The specified item could not be found in the keychain."
+
+// Osxkeychain handles secrets using the OS X Keychain as store.
+type Osxkeychain struct{}
+
+// Add adds new credentials to the keychain.
+func (h Osxkeychain) Add(creds *credentials.Credentials) error {
+ h.Delete(creds.ServerURL)
+
+ s, err := splitServer(creds.ServerURL)
+ if err != nil {
+ return err
+ }
+ defer freeServer(s)
+
+ label := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(label))
+ username := C.CString(creds.Username)
+ defer C.free(unsafe.Pointer(username))
+ secret := C.CString(creds.Secret)
+ defer C.free(unsafe.Pointer(secret))
+
+ errMsg := C.keychain_add(s, label, username, secret)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ return errors.New(C.GoString(errMsg))
+ }
+
+ return nil
+}
+
+// Delete removes credentials from the keychain.
+func (h Osxkeychain) Delete(serverURL string) error {
+ s, err := splitServer(serverURL)
+ if err != nil {
+ return err
+ }
+ defer freeServer(s)
+
+ errMsg := C.keychain_delete(s)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ return errors.New(C.GoString(errMsg))
+ }
+
+ return nil
+}
+
+// Get returns the username and secret to use for a given registry server URL.
+func (h Osxkeychain) Get(serverURL string) (string, string, error) {
+ s, err := splitServer(serverURL)
+ if err != nil {
+ return "", "", err
+ }
+ defer freeServer(s)
+
+ var usernameLen C.uint
+ var username *C.char
+ var secretLen C.uint
+ var secret *C.char
+ defer C.free(unsafe.Pointer(username))
+ defer C.free(unsafe.Pointer(secret))
+
+ errMsg := C.keychain_get(s, &usernameLen, &username, &secretLen, &secret)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ goMsg := C.GoString(errMsg)
+ if goMsg == errCredentialsNotFound {
+ return "", "", credentials.NewErrCredentialsNotFound()
+ }
+
+ return "", "", errors.New(goMsg)
+ }
+
+ user := C.GoStringN(username, C.int(usernameLen))
+ pass := C.GoStringN(secret, C.int(secretLen))
+ return user, pass, nil
+}
+
+// List returns the stored URLs and corresponding usernames.
+func (h Osxkeychain) List() (map[string]string, error) {
+ credsLabelC := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(credsLabelC))
+
+ var pathsC **C.char
+ defer C.free(unsafe.Pointer(pathsC))
+ var acctsC **C.char
+ defer C.free(unsafe.Pointer(acctsC))
+ var listLenC C.uint
+ errMsg := C.keychain_list(credsLabelC, &pathsC, &acctsC, &listLenC)
+ if errMsg != nil {
+ defer C.free(unsafe.Pointer(errMsg))
+ goMsg := C.GoString(errMsg)
+ return nil, errors.New(goMsg)
+ }
+
+ defer C.freeListData(&pathsC, listLenC)
+ defer C.freeListData(&acctsC, listLenC)
+
+ var listLen int
+ listLen = int(listLenC)
+ pathTmp := (*[1 << 30]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen]
+ acctTmp := (*[1 << 30]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen]
+ //taking the array of c strings into go while ignoring all the stuff irrelevant to credentials-helper
+ resp := make(map[string]string)
+ for i := 0; i < listLen; i++ {
+ if C.GoString(pathTmp[i]) == "0" {
+ continue
+ }
+ resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i])
+ }
+ return resp, nil
+}
+
+func splitServer(serverURL string) (*C.struct_Server, error) {
+ u, err := parseURL(serverURL)
+ if err != nil {
+ return nil, err
+ }
+
+ proto := C.kSecProtocolTypeHTTPS
+ if u.Scheme == "http" {
+ proto = C.kSecProtocolTypeHTTP
+ }
+ var port int
+ p := getPort(u)
+ if p != "" {
+ port, err = strconv.Atoi(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &C.struct_Server{
+ proto: C.SecProtocolType(proto),
+ host: C.CString(getHostname(u)),
+ port: C.uint(port),
+ path: C.CString(u.Path),
+ }, nil
+}
+
+func freeServer(s *C.struct_Server) {
+ C.free(unsafe.Pointer(s.host))
+ C.free(unsafe.Pointer(s.path))
+}
+
+// parseURL parses and validates a given serverURL to an url.URL, and
+// returns an error if validation failed. Querystring parameters are
+// omitted in the resulting URL, because they are not used in the helper.
+//
+// If serverURL does not have a valid scheme, `//` is used as scheme
+// before parsing. This prevents the hostname being used as path,
+// and the credentials being stored without host.
+func parseURL(serverURL string) (*url.URL, error) {
+ // Check if serverURL has a scheme, otherwise add `//` as scheme.
+ if !strings.Contains(serverURL, "://") && !strings.HasPrefix(serverURL, "//") {
+ serverURL = "//" + serverURL
+ }
+
+ u, err := url.Parse(serverURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Scheme != "" && u.Scheme != "https" && u.Scheme != "http" {
+ return nil, errors.New("unsupported scheme: " + u.Scheme)
+ }
+ if getHostname(u) == "" {
+ return nil, errors.New("no hostname in URL")
+ }
+
+ u.RawQuery = ""
+ return u, nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h
new file mode 100644
index 000000000..c54e7d728
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h
@@ -0,0 +1,14 @@
+#include <Security/Security.h>
+
+struct Server {
+ SecProtocolType proto;
+ char *host;
+ char *path;
+ unsigned int port;
+};
+
+char *keychain_add(struct Server *server, char *label, char *username, char *secret);
+char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret);
+char *keychain_delete(struct Server *server);
+char *keychain_list(char *credsLabel, char *** data, char *** accts, unsigned int *list_l);
+void freeListData(char *** data, unsigned int length); \ No newline at end of file
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go
new file mode 100644
index 000000000..0b7297d2f
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go
@@ -0,0 +1,13 @@
+//+build go1.8
+
+package osxkeychain
+
+import "net/url"
+
+func getHostname(u *url.URL) string {
+ return u.Hostname()
+}
+
+func getPort(u *url.URL) string {
+ return u.Port()
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go
new file mode 100644
index 000000000..bdf9b7b00
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go
@@ -0,0 +1,41 @@
+//+build !go1.8
+
+package osxkeychain
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getHostname(u *url.URL) string {
+ return stripPort(u.Host)
+}
+
+func getPort(u *url.URL) string {
+ return portOnly(u.Host)
+}
+
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c
new file mode 100644
index 000000000..35dea92da
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c
@@ -0,0 +1,162 @@
+#include <string.h>
+#include <stdlib.h>
+#include "secretservice_linux.h"
+
+const SecretSchema *docker_get_schema(void)
+{
+ static const SecretSchema docker_schema = {
+ "io.docker.Credentials", SECRET_SCHEMA_NONE,
+ {
+ { "label", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "server", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "username", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "docker_cli", SECRET_SCHEMA_ATTRIBUTE_STRING },
+ { "NULL", 0 },
+ }
+ };
+ return &docker_schema;
+}
+
+GError *add(char *label, char *server, char *username, char *secret) {
+ GError *err = NULL;
+
+ secret_password_store_sync (DOCKER_SCHEMA, SECRET_COLLECTION_DEFAULT,
+ server, secret, NULL, &err,
+ "label", label,
+ "server", server,
+ "username", username,
+ "docker_cli", "1",
+ NULL);
+ return err;
+}
+
+GError *delete(char *server) {
+ GError *err = NULL;
+
+ secret_password_clear_sync(DOCKER_SCHEMA, NULL, &err,
+ "server", server,
+ "docker_cli", "1",
+ NULL);
+ if (err != NULL)
+ return err;
+ return NULL;
+}
+
+char *get_attribute(const char *attribute, SecretItem *item) {
+ GHashTable *attributes;
+ GHashTableIter iter;
+ gchar *value, *key;
+
+ attributes = secret_item_get_attributes(item);
+ g_hash_table_iter_init(&iter, attributes);
+ while (g_hash_table_iter_next(&iter, (void **)&key, (void **)&value)) {
+ if (strncmp(key, attribute, strlen(key)) == 0)
+ return (char *)value;
+ }
+ g_hash_table_unref(attributes);
+ return NULL;
+}
+
+GError *get(char *server, char **username, char **secret) {
+ GError *err = NULL;
+ GHashTable *attributes;
+ SecretService *service;
+ GList *items, *l;
+ SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK;
+ SecretValue *secretValue;
+ gsize length;
+ gchar *value;
+
+ attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
+ g_hash_table_insert(attributes, g_strdup("server"), g_strdup(server));
+ g_hash_table_insert(attributes, g_strdup("docker_cli"), g_strdup("1"));
+
+ service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err);
+ if (err == NULL) {
+ items = secret_service_search_sync(service, DOCKER_SCHEMA, attributes, flags, NULL, &err);
+ if (err == NULL) {
+ for (l = items; l != NULL; l = g_list_next(l)) {
+ value = secret_item_get_schema_name(l->data);
+ if (strncmp(value, "io.docker.Credentials", strlen(value)) != 0) {
+ g_free(value);
+ continue;
+ }
+ g_free(value);
+ secretValue = secret_item_get_secret(l->data);
+ if (secret != NULL) {
+ *secret = strdup(secret_value_get(secretValue, &length));
+ secret_value_unref(secretValue);
+ }
+ *username = get_attribute("username", l->data);
+ }
+ g_list_free_full(items, g_object_unref);
+ }
+ g_object_unref(service);
+ }
+ g_hash_table_unref(attributes);
+ if (err != NULL) {
+ return err;
+ }
+ return NULL;
+}
+
+GError *list(char *ref_label, char *** paths, char *** accts, unsigned int *list_l) {
+ GList *items;
+ GError *err = NULL;
+ SecretService *service;
+ SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK;
+ GHashTable *attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free);
+
+ // List credentials with the right label only
+ g_hash_table_insert(attributes, g_strdup("label"), g_strdup(ref_label));
+
+ service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err);
+ if (err != NULL) {
+ return err;
+ }
+
+ items = secret_service_search_sync(service, NULL, attributes, flags, NULL, &err);
+ int numKeys = g_list_length(items);
+ if (err != NULL) {
+ return err;
+ }
+
+ char **tmp_paths = (char **) calloc(1,(int)sizeof(char *)*numKeys);
+ char **tmp_accts = (char **) calloc(1,(int)sizeof(char *)*numKeys);
+
+ // items now contains our keys from the gnome keyring
+ // we will now put it in our two lists to return it to go
+ GList *current;
+ int listNumber = 0;
+ for(current = items; current!=NULL; current = current->next) {
+ char *pathTmp = secret_item_get_label(current->data);
+ // you cannot have a key without a label in the gnome keyring
+ char *acctTmp = get_attribute("username",current->data);
+ if (acctTmp==NULL) {
+ acctTmp = "account not defined";
+ }
+
+ tmp_paths[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(pathTmp)+1));
+ tmp_accts[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(acctTmp)+1));
+
+ memcpy(tmp_paths[listNumber], pathTmp, sizeof(char)*(strlen(pathTmp)+1));
+ memcpy(tmp_accts[listNumber], acctTmp, sizeof(char)*(strlen(acctTmp)+1));
+
+ listNumber = listNumber + 1;
+ }
+
+ *paths = (char **) realloc(tmp_paths, (int)sizeof(char *)*listNumber);
+ *accts = (char **) realloc(tmp_accts, (int)sizeof(char *)*listNumber);
+
+ *list_l = listNumber;
+
+ return NULL;
+}
+
+void freeListData(char *** data, unsigned int length) {
+ int i;
+ for(i=0; i<length; i++) {
+ free((*data)[i]);
+ }
+ free(*data);
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go
new file mode 100644
index 000000000..95a1310b6
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.go
@@ -0,0 +1,118 @@
+package secretservice
+
+/*
+#cgo pkg-config: libsecret-1
+
+#include "secretservice_linux.h"
+#include <stdlib.h>
+*/
+import "C"
+import (
+ "errors"
+ "unsafe"
+
+ "github.com/docker/docker-credential-helpers/credentials"
+)
+
+// Secretservice handles secrets using Linux secret-service as a store.
+type Secretservice struct{}
+
+// Add adds new credentials to the keychain.
+func (h Secretservice) Add(creds *credentials.Credentials) error {
+ if creds == nil {
+ return errors.New("missing credentials")
+ }
+ credsLabel := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(credsLabel))
+ server := C.CString(creds.ServerURL)
+ defer C.free(unsafe.Pointer(server))
+ username := C.CString(creds.Username)
+ defer C.free(unsafe.Pointer(username))
+ secret := C.CString(creds.Secret)
+ defer C.free(unsafe.Pointer(secret))
+
+ if err := C.add(credsLabel, server, username, secret); err != nil {
+ defer C.g_error_free(err)
+ errMsg := (*C.char)(unsafe.Pointer(err.message))
+ return errors.New(C.GoString(errMsg))
+ }
+ return nil
+}
+
+// Delete removes credentials from the store.
+func (h Secretservice) Delete(serverURL string) error {
+ if serverURL == "" {
+ return errors.New("missing server url")
+ }
+ server := C.CString(serverURL)
+ defer C.free(unsafe.Pointer(server))
+
+ if err := C.delete(server); err != nil {
+ defer C.g_error_free(err)
+ errMsg := (*C.char)(unsafe.Pointer(err.message))
+ return errors.New(C.GoString(errMsg))
+ }
+ return nil
+}
+
+// Get returns the username and secret to use for a given registry server URL.
+func (h Secretservice) Get(serverURL string) (string, string, error) {
+ if serverURL == "" {
+ return "", "", errors.New("missing server url")
+ }
+ var username *C.char
+ defer C.free(unsafe.Pointer(username))
+ var secret *C.char
+ defer C.free(unsafe.Pointer(secret))
+ server := C.CString(serverURL)
+ defer C.free(unsafe.Pointer(server))
+
+ err := C.get(server, &username, &secret)
+ if err != nil {
+ defer C.g_error_free(err)
+ errMsg := (*C.char)(unsafe.Pointer(err.message))
+ return "", "", errors.New(C.GoString(errMsg))
+ }
+ user := C.GoString(username)
+ pass := C.GoString(secret)
+ if pass == "" {
+ return "", "", credentials.NewErrCredentialsNotFound()
+ }
+ return user, pass, nil
+}
+
+// List returns the stored URLs and corresponding usernames for a given credentials label
+func (h Secretservice) List() (map[string]string, error) {
+ credsLabelC := C.CString(credentials.CredsLabel)
+ defer C.free(unsafe.Pointer(credsLabelC))
+
+ var pathsC **C.char
+ defer C.free(unsafe.Pointer(pathsC))
+ var acctsC **C.char
+ defer C.free(unsafe.Pointer(acctsC))
+ var listLenC C.uint
+ err := C.list(credsLabelC, &pathsC, &acctsC, &listLenC)
+ if err != nil {
+ defer C.free(unsafe.Pointer(err))
+ return nil, errors.New("Error from list function in secretservice_linux.c likely due to error in secretservice library")
+ }
+ defer C.freeListData(&pathsC, listLenC)
+ defer C.freeListData(&acctsC, listLenC)
+
+ resp := make(map[string]string)
+
+ listLen := int(listLenC)
+ if listLen == 0 {
+ return resp, nil
+ }
+ // The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible
+ // with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system
+ // and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656
+ pathTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen]
+ acctTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen]
+ for i := 0; i < listLen; i++ {
+ resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i])
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h
new file mode 100644
index 000000000..a28179db3
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h
@@ -0,0 +1,13 @@
+#define SECRET_WITH_UNSTABLE 1
+#define SECRET_API_SUBJECT_TO_CHANGE 1
+#include <libsecret/secret.h>
+
+const SecretSchema *docker_get_schema(void) G_GNUC_CONST;
+
+#define DOCKER_SCHEMA docker_get_schema()
+
+GError *add(char *label, char *server, char *username, char *secret);
+GError *delete(char *server);
+GError *get(char *server, char **username, char **secret);
+GError *list(char *label, char *** paths, char *** accts, unsigned int *list_l);
+void freeListData(char *** data, unsigned int length);
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
new file mode 100644
index 000000000..9c8e20ab8
--- /dev/null
+++ b/vendor/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2017 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
new file mode 100644
index 000000000..0c74e15b0
--- /dev/null
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2017 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md
new file mode 100644
index 000000000..533d7717d
--- /dev/null
+++ b/vendor/github.com/docker/docker/README.md
@@ -0,0 +1,90 @@
+### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects
+
+### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details
+
+The Moby Project
+================
+
+![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
+
+Moby is an open-source project created by Docker to advance the software containerization movement.
+It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas.
+
+# Moby
+
+## Overview
+
+At the core of Moby is a framework to assemble specialized container systems.
+It provides:
+
+- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc.
+- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers.
+- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own.
+
+All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container.
+
+## Principles
+
+Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction.
+The guiding principles are:
+
+- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
+- Usable security: Moby will provide secure defaults without compromising usability.
+- Container centric: Moby is built with containers, for running containers.
+
+With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily.
+
+Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime.
+
+## Audience
+
+Moby is recommended for anyone who wants to assemble a container-based system. This includes:
+
+- Hackers who want to customize or patch their Docker build
+- System engineers or integrators building a container system
+- Infrastructure providers looking to adapt existing container systems to their environment
+- Container enthusiasts who want to experiment with the latest container tech
+- Open-source developers looking to test their project in a variety of different systems
+- Anyone curious about Docker internals and how it’s built
+
+Moby is NOT recommended for:
+
+- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead.
+- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead.
+- Anyone curious about containers and looking for an easy way to learn. We recommend the [docker.com](https://www.docker.com/) website instead.
+
+# Transitioning to Moby
+
+Docker is transitioning all of its open source collaborations to the Moby project going forward.
+During the transition, all open source activity should continue as usual.
+
+We are proposing the following list of changes:
+
+- splitting up the engine into more open components
+- removing the docker UI, SDK etc to keep them in the Docker org
+- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform
+- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community
+- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor)
+
+-----
+
+Legal
+=====
+
+*Brought to you courtesy of our legal counsel. For more context,
+please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.*
+
+Use and transfer of Moby may be subject to certain restrictions by the
+United States and other governments.
+
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+
+Licensing
+=========
+Moby is licensed under the Apache License, Version 2.0. See
+[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full
+license text.
diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md
new file mode 100644
index 000000000..bb8813252
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/README.md
@@ -0,0 +1,42 @@
+# Working on the Engine API
+
+The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
+
+It consists of various components in this repository:
+
+- `api/swagger.yaml` A Swagger definition of the API.
+- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
+- `cli/` The command-line client.
+- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
+- `daemon/` The daemon, which serves the API.
+
+## Swagger definition
+
+The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
+
+1. Automatically generate documentation.
+2. Automatically generate the Go server and client. (A work-in-progress.)
+3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
+
+## Updating the API documentation
+
+The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation.
+
+The file is split into two main sections:
+
+- `definitions`, which defines re-usable objects used in requests and responses
+- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
+
+To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
+
+There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919)
+
+`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing.
+
+## Viewing the API documentation
+
+When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
+
+Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
+
+The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
new file mode 100644
index 000000000..6e462aeda
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/common.go
@@ -0,0 +1,65 @@
+package api
+
+import (
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/system"
+ "github.com/docker/libtrust"
+)
+
+// Common constants for daemon and client.
+const (
+ // DefaultVersion of Current REST API
+ DefaultVersion string = "1.32"
+
+ // NoBaseImageSpecifier is the symbol used by the FROM
+ // command to specify that no base image is to be used.
+ NoBaseImageSpecifier string = "scratch"
+)
+
+// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
+// otherwise generates a new one
+func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
+ err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
+ if err != nil {
+ return nil, err
+ }
+ trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
+ if err == libtrust.ErrKeyFileDoesNotExist {
+ trustKey, err = libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, fmt.Errorf("Error generating key: %s", err)
+ }
+ encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
+ if err != nil {
+ return nil, fmt.Errorf("Error serializing key: %s", err)
+ }
+ if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
+ return nil, fmt.Errorf("Error saving key file: %s", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
+ }
+ return trustKey, nil
+}
+
+func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
+ if ext == ".json" || ext == ".jwk" {
+ encoded, err = json.Marshal(key)
+ if err != nil {
+ return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
+ }
+ } else {
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
+ }
+ encoded = pem.EncodeToMemory(pemBlock)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go
new file mode 100644
index 000000000..081e61c45
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/common_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package api
+
+// MinVersion represents Minimum REST API version supported
+const MinVersion string = "1.12"
diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go
new file mode 100644
index 000000000..a6268a4ff
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/common_windows.go
@@ -0,0 +1,8 @@
+package api
+
+// MinVersion represents Minimum REST API version supported
+// Technically the first daemon API version released on Windows is v1.25 in
+// engine version 1.13. However, some clients are explicitly using downlevel
+// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
+// Hence also allowing 1.24 on Windows.
+const MinVersion string = "1.24"
diff --git a/vendor/github.com/docker/docker/api/names.go b/vendor/github.com/docker/docker/api/names.go
new file mode 100644
index 000000000..f147d1f4c
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/names.go
@@ -0,0 +1,9 @@
+package api
+
+import "regexp"
+
+// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
+const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
+
+// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
+var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go
new file mode 100644
index 000000000..056af6b84
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/auth.go
@@ -0,0 +1,22 @@
+package types
+
+// AuthConfig contains authorization information for connecting to a Registry
+type AuthConfig struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Auth string `json:"auth,omitempty"`
+
+ // Email is an optional value associated with the username.
+ // This field is deprecated and will be removed in a later
+ // version of docker.
+ Email string `json:"email,omitempty"`
+
+ ServerAddress string `json:"serveraddress,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
new file mode 100644
index 000000000..931ae10ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
@@ -0,0 +1,23 @@
+package blkiodev
+
+import "fmt"
+
+// WeightDevice is a structure that holds device:weight pair
+type WeightDevice struct {
+ Path string
+ Weight uint16
+}
+
+func (w *WeightDevice) String() string {
+ return fmt.Sprintf("%s:%d", w.Path, w.Weight)
+}
+
+// ThrottleDevice is a structure that holds device:rate_per_second pair
+type ThrottleDevice struct {
+ Path string
+ Rate uint64
+}
+
+func (t *ThrottleDevice) String() string {
+ return fmt.Sprintf("%s:%d", t.Path, t.Rate)
+}
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
new file mode 100644
index 000000000..18a1263f1
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/client.go
@@ -0,0 +1,389 @@
+package types
+
+import (
+ "bufio"
+ "io"
+ "net"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ units "github.com/docker/go-units"
+)
+
+// CheckpointCreateOptions holds parameters to create a checkpoint from a container
+type CheckpointCreateOptions struct {
+ CheckpointID string
+ CheckpointDir string
+ Exit bool
+}
+
+// CheckpointListOptions holds parameters to list checkpoints for a container
+type CheckpointListOptions struct {
+ CheckpointDir string
+}
+
+// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
+type CheckpointDeleteOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
+
+// ContainerAttachOptions holds parameters to attach to a container.
+type ContainerAttachOptions struct {
+ Stream bool
+ Stdin bool
+ Stdout bool
+ Stderr bool
+ DetachKeys string
+ Logs bool
+}
+
+// ContainerCommitOptions holds parameters to commit changes into a container.
+type ContainerCommitOptions struct {
+ Reference string
+ Comment string
+ Author string
+ Changes []string
+ Pause bool
+ Config *container.Config
+}
+
+// ContainerExecInspect holds information returned by exec inspect.
+type ContainerExecInspect struct {
+ ExecID string
+ ContainerID string
+ Running bool
+ ExitCode int
+ Pid int
+}
+
+// ContainerListOptions holds parameters to list containers with.
+type ContainerListOptions struct {
+ Quiet bool
+ Size bool
+ All bool
+ Latest bool
+ Since string
+ Before string
+ Limit int
+ Filters filters.Args
+}
+
+// ContainerLogsOptions holds parameters to filter logs with.
+type ContainerLogsOptions struct {
+ ShowStdout bool
+ ShowStderr bool
+ Since string
+ Timestamps bool
+ Follow bool
+ Tail string
+ Details bool
+}
+
+// ContainerRemoveOptions holds parameters to remove containers.
+type ContainerRemoveOptions struct {
+ RemoveVolumes bool
+ RemoveLinks bool
+ Force bool
+}
+
+// ContainerStartOptions holds parameters to start containers.
+type ContainerStartOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
+
+// CopyToContainerOptions holds information
+// about files to copy into a container
+type CopyToContainerOptions struct {
+ AllowOverwriteDirWithFile bool
+ CopyUIDGID bool
+}
+
+// EventsOptions holds parameters to filter events with.
+type EventsOptions struct {
+ Since string
+ Until string
+ Filters filters.Args
+}
+
+// NetworkListOptions holds parameters to filter the list of networks with.
+type NetworkListOptions struct {
+ Filters filters.Args
+}
+
+// HijackedResponse holds connection information for a hijacked request.
+type HijackedResponse struct {
+ Conn net.Conn
+ Reader *bufio.Reader
+}
+
+// Close closes the hijacked connection and reader.
+func (h *HijackedResponse) Close() {
+ h.Conn.Close()
+}
+
+// CloseWriter is an interface that implements structs
+// that close input streams to prevent from writing.
+type CloseWriter interface {
+ CloseWrite() error
+}
+
+// CloseWrite closes a readWriter for writing.
+func (h *HijackedResponse) CloseWrite() error {
+ if conn, ok := h.Conn.(CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// ImageBuildOptions holds the information
+// necessary to build images.
+type ImageBuildOptions struct {
+ Tags []string
+ SuppressOutput bool
+ RemoteContext string
+ NoCache bool
+ Remove bool
+ ForceRemove bool
+ PullParent bool
+ Isolation container.Isolation
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares int64
+ CPUQuota int64
+ CPUPeriod int64
+ Memory int64
+ MemorySwap int64
+ CgroupParent string
+ NetworkMode string
+ ShmSize int64
+ Dockerfile string
+ Ulimits []*units.Ulimit
+ // BuildArgs needs to be a *string instead of just a string so that
+ // we can tell the difference between "" (empty string) and no value
+ // at all (nil). See the parsing of buildArgs in
+ // api/server/router/build/build_routes.go for even more info.
+ BuildArgs map[string]*string
+ AuthConfigs map[string]AuthConfig
+ Context io.Reader
+ Labels map[string]string
+ // squash the resulting image's layers to the parent
+ // preserves the original image and creates a new one from the parent with all
+ // the changes applied to a single layer
+ Squash bool
+ // CacheFrom specifies images that are used for matching cache. Images
+ // specified here do not need to have a valid parent chain to match cache.
+ CacheFrom []string
+ SecurityOpt []string
+ ExtraHosts []string // List of extra hosts
+ Target string
+ SessionID string
+
+ // TODO @jhowardmsft LCOW Support: This will require extending to include
+ // `Platform string`, but is ommited for now as it's hard-coded temporarily
+ // to avoid API changes.
+}
+
+// ImageBuildResponse holds information
+// returned by a server after building
+// an image.
+type ImageBuildResponse struct {
+ Body io.ReadCloser
+ OSType string
+}
+
+// ImageCreateOptions holds information to create images.
+type ImageCreateOptions struct {
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+}
+
+// ImageImportSource holds source information for ImageImport
+type ImageImportSource struct {
+ Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
+ SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
+}
+
+// ImageImportOptions holds information to import images from the client host.
+type ImageImportOptions struct {
+ Tag string // Tag is the name to tag this image with. This attribute is deprecated.
+ Message string // Message is the message to tag the image with
+ Changes []string // Changes are the raw changes to apply to this image
+}
+
+// ImageListOptions holds parameters to filter the list of images with.
+type ImageListOptions struct {
+ All bool
+ Filters filters.Args
+}
+
+// ImageLoadResponse returns information to the client about a load process.
+type ImageLoadResponse struct {
+ // Body must be closed to avoid a resource leak
+ Body io.ReadCloser
+ JSON bool
+}
+
+// ImagePullOptions holds information to pull images.
+type ImagePullOptions struct {
+ All bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ PrivilegeFunc RequestPrivilegeFunc
+}
+
+// RequestPrivilegeFunc is a function interface that
+// clients can supply to retry operations after
+// getting an authorization error.
+// This function returns the registry authentication
+// header value in base 64 format, or an error
+// if the privilege request fails.
+type RequestPrivilegeFunc func() (string, error)
+
+//ImagePushOptions holds information to push images.
+type ImagePushOptions ImagePullOptions
+
+// ImageRemoveOptions holds parameters to remove images.
+type ImageRemoveOptions struct {
+ Force bool
+ PruneChildren bool
+}
+
+// ImageSearchOptions holds parameters to search images with.
+type ImageSearchOptions struct {
+ RegistryAuth string
+ PrivilegeFunc RequestPrivilegeFunc
+ Filters filters.Args
+ Limit int
+}
+
+// ResizeOptions holds parameters to resize a tty.
+// It can be used to resize container ttys and
+// exec process ttys too.
+type ResizeOptions struct {
+ Height uint
+ Width uint
+}
+
+// NodeListOptions holds parameters to list nodes with.
+type NodeListOptions struct {
+ Filters filters.Args
+}
+
+// NodeRemoveOptions holds parameters to remove nodes with.
+type NodeRemoveOptions struct {
+ Force bool
+}
+
+// ServiceCreateOptions contains the options to use when creating a service.
+type ServiceCreateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceCreateResponse contains the information returned to a client
+// on the creation of a new service.
+type ServiceCreateResponse struct {
+ // ID is the ID of the created service.
+ ID string
+ // Warnings is a set of non-fatal warning messages to pass on to the user.
+ Warnings []string `json:",omitempty"`
+}
+
+// Values for RegistryAuthFrom in ServiceUpdateOptions
+const (
+ RegistryAuthFromSpec = "spec"
+ RegistryAuthFromPreviousSpec = "previous-spec"
+)
+
+// ServiceUpdateOptions contains the options to be used for updating services.
+type ServiceUpdateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
+ // into this field. While it does open API users up to racy writes, most
+ // users may not need that level of consistency in practice.
+
+ // RegistryAuthFrom specifies where to find the registry authorization
+ // credentials if they are not given in EncodedRegistryAuth. Valid
+ // values are "spec" and "previous-spec".
+ RegistryAuthFrom string
+
+ // Rollback indicates whether a server-side rollback should be
+ // performed. When this is set, the provided spec will be ignored.
+ // The valid values are "previous" and "none". An empty value is the
+ // same as "none".
+ Rollback string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceListOptions holds parameters to list services with.
+type ServiceListOptions struct {
+ Filters filters.Args
+}
+
+// ServiceInspectOptions holds parameters related to the "service inspect"
+// operation.
+type ServiceInspectOptions struct {
+ InsertDefaults bool
+}
+
+// TaskListOptions holds parameters to list tasks with.
+type TaskListOptions struct {
+ Filters filters.Args
+}
+
+// PluginRemoveOptions holds parameters to remove plugins.
+type PluginRemoveOptions struct {
+ Force bool
+}
+
+// PluginEnableOptions holds parameters to enable plugins.
+type PluginEnableOptions struct {
+ Timeout int
+}
+
+// PluginDisableOptions holds parameters to disable plugins.
+type PluginDisableOptions struct {
+ Force bool
+}
+
+// PluginInstallOptions holds parameters to install a plugin.
+type PluginInstallOptions struct {
+ Disabled bool
+ AcceptAllPermissions bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ RemoteRef string // RemoteRef is the plugin name on the registry
+ PrivilegeFunc RequestPrivilegeFunc
+ AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
+ Args []string
+}
+
+// SwarmUnlockKeyResponse contains the response for Engine API:
+// GET /swarm/unlockkey
+type SwarmUnlockKeyResponse struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// PluginCreateOptions hold all options to plugin create.
+type PluginCreateOptions struct {
+ RepoName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go
new file mode 100644
index 000000000..e4d2ce6e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/configs.go
@@ -0,0 +1,70 @@
+package types
+
+import (
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+)
+
+// configs holds structs used for internal communication between the
+// frontend (such as an http server) and the backend (such as the
+// docker daemon).
+
+// ContainerCreateConfig is the parameter set to ContainerCreate()
+type ContainerCreateConfig struct {
+ Name string
+ Config *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+ AdjustCPUShares bool
+ Platform string
+}
+
+// ContainerRmConfig holds arguments for the container remove
+// operation. This struct is used to tell the backend what operations
+// to perform.
+type ContainerRmConfig struct {
+ ForceRemove, RemoveVolume, RemoveLink bool
+}
+
+// ContainerCommitConfig contains build configs for commit operation,
+// and is used when making a commit with the current state of the container.
+type ContainerCommitConfig struct {
+ Pause bool
+ Repo string
+ Tag string
+ Author string
+ Comment string
+ // merge container config into commit config before commit
+ MergeConfigs bool
+ Config *container.Config
+}
+
+// ExecConfig is a small subset of the Config struct that holds the configuration
+// for the exec feature of docker.
+type ExecConfig struct {
+ User string // User that will run the command
+ Privileged bool // Is the container in privileged mode
+ Tty bool // Attach standard streams to a tty.
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStderr bool // Attach the standard error
+ AttachStdout bool // Attach the standard output
+ Detach bool // Execute in detach mode
+ DetachKeys string // Escape keys for detach
+ Env []string // Environment variables
+ Cmd []string // Execution commands and args
+}
+
+// PluginRmConfig holds arguments for plugin remove.
+type PluginRmConfig struct {
+ ForceRemove bool
+}
+
+// PluginEnableConfig holds arguments for plugin enable
+type PluginEnableConfig struct {
+ Timeout int
+}
+
+// PluginDisableConfig holds arguments for plugin disable.
+type PluginDisableConfig struct {
+ ForceDisable bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go
new file mode 100644
index 000000000..55a03fc98
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/config.go
@@ -0,0 +1,69 @@
+package container
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+)
+
+// MinimumDuration puts a minimum on user configured duration.
+// This is to prevent API error on time unit. For example, API may
+// set 3 as healthcheck interval with intention of 3 seconds, but
+// Docker interprets it as 3 nanoseconds.
+const MinimumDuration = 1 * time.Millisecond
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Config contains the configuration data about a container.
+// It should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
+type Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go
new file mode 100644
index 000000000..767945a53
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerChangeResponseItem container change response item
+// swagger:model ContainerChangeResponseItem
+type ContainerChangeResponseItem struct {
+
+ // Kind of change
+ // Required: true
+ Kind uint8 `json:"Kind"`
+
+ // Path to file that has changed
+ // Required: true
+ Path string `json:"Path"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go
new file mode 100644
index 000000000..c95023b81
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_create.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerCreateCreatedBody container create created body
+// swagger:model ContainerCreateCreatedBody
+type ContainerCreateCreatedBody struct {
+
+ // The ID of the created container
+ // Required: true
+ ID string `json:"Id"`
+
+ // Warnings encountered when creating the container
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go
new file mode 100644
index 000000000..78bc37ee5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_top.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerTopOKBody container top o k body
+// swagger:model ContainerTopOKBody
+type ContainerTopOKBody struct {
+
+ // Each process running in the container, where each is process is an array of values corresponding to the titles
+ // Required: true
+ Processes [][]string `json:"Processes"`
+
+ // The ps column titles
+ // Required: true
+ Titles []string `json:"Titles"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go
new file mode 100644
index 000000000..2339366fb
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_update.go
@@ -0,0 +1,17 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerUpdateOKBody container update o k body
+// swagger:model ContainerUpdateOKBody
+type ContainerUpdateOKBody struct {
+
+ // warnings
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go
new file mode 100644
index 000000000..77ecdbaf7
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go
@@ -0,0 +1,17 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerWaitOKBody container wait o k body
+// swagger:model ContainerWaitOKBody
+type ContainerWaitOKBody struct {
+
+ // Exit code of the container
+ // Required: true
+ StatusCode int64 `json:"StatusCode"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
new file mode 100644
index 000000000..9fea9eb04
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -0,0 +1,380 @@
+package container
+
+import (
+ "strings"
+
+ "github.com/docker/docker/api/types/blkiodev"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+ "github.com/docker/go-units"
+)
+
+// Isolation represents the isolation technology of a container. The supported
+// values are platform specific
+type Isolation string
+
+// IsDefault indicates the default isolation technology of a container. On Linux this
+// is the native driver. On Windows, this is a Windows Server Container.
+func (i Isolation) IsDefault() bool {
+ return strings.ToLower(string(i)) == "default" || string(i) == ""
+}
+
+// IpcMode represents the container ipc stack.
+type IpcMode string
+
+// IsPrivate indicates whether the container uses its private ipc stack.
+func (n IpcMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's ipc stack.
+func (n IpcMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's ipc stack.
+func (n IpcMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the ipc stack is valid.
+func (n IpcMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ case "container":
+ if len(parts) != 2 || parts[1] == "" {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+// Container returns the name of the container ipc stack is going to be used.
+func (n IpcMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// NetworkMode represents the container network stack.
+type NetworkMode string
+
+// IsNone indicates whether container isn't using a network stack.
+func (n NetworkMode) IsNone() bool {
+ return n == "none"
+}
+
+// IsDefault indicates whether container uses the default network stack.
+func (n NetworkMode) IsDefault() bool {
+ return n == "default"
+}
+
+// IsPrivate indicates whether container uses its private network stack.
+func (n NetworkMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsContainer indicates whether container uses a container network stack.
+func (n NetworkMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// ConnectedContainer is the id of the container which network this container is connected to.
+func (n NetworkMode) ConnectedContainer() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+//UserDefined indicates user-created network
+func (n NetworkMode) UserDefined() string {
+ if n.IsUserDefined() {
+ return string(n)
+ }
+ return ""
+}
+
+// UsernsMode represents userns mode in the container.
+type UsernsMode string
+
+// IsHost indicates whether the container uses the host's userns.
+func (n UsernsMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsPrivate indicates whether the container uses the a private userns.
+func (n UsernsMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// Valid indicates whether the userns is valid.
+func (n UsernsMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// CgroupSpec represents the cgroup to use for the container.
+type CgroupSpec string
+
+// IsContainer indicates whether the container is using another container cgroup
+func (c CgroupSpec) IsContainer() bool {
+ parts := strings.SplitN(string(c), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the cgroup spec is valid.
+func (c CgroupSpec) Valid() bool {
+ return c.IsContainer() || c == ""
+}
+
+// Container returns the name of the container whose cgroup will be used.
+func (c CgroupSpec) Container() string {
+ parts := strings.SplitN(string(c), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// UTSMode represents the UTS namespace of the container.
+type UTSMode string
+
+// IsPrivate indicates whether the container uses its private UTS namespace.
+func (n UTSMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// IsHost indicates whether the container uses the host's UTS namespace.
+func (n UTSMode) IsHost() bool {
+ return n == "host"
+}
+
+// Valid indicates whether the UTS namespace is valid.
+func (n UTSMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// PidMode represents the pid namespace of the container.
+type PidMode string
+
+// IsPrivate indicates whether the container uses its own new pid namespace.
+func (n PidMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's pid namespace.
+func (n PidMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's pid namespace.
+func (n PidMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the pid namespace is valid.
+func (n PidMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ case "container":
+ if len(parts) != 2 || parts[1] == "" {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+// Container returns the name of the container whose pid namespace is going to be used.
+func (n PidMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// DeviceMapping represents the device mapping between the host and the container.
+type DeviceMapping struct {
+ PathOnHost string
+ PathInContainer string
+ CgroupPermissions string
+}
+
+// RestartPolicy represents the restart policies of the container.
+type RestartPolicy struct {
+ Name string
+ MaximumRetryCount int
+}
+
+// IsNone indicates whether the container has the "no" restart policy.
+// This means the container will not automatically restart when exiting.
+func (rp *RestartPolicy) IsNone() bool {
+ return rp.Name == "no" || rp.Name == ""
+}
+
+// IsAlways indicates whether the container has the "always" restart policy.
+// This means the container will automatically restart regardless of the exit status.
+func (rp *RestartPolicy) IsAlways() bool {
+ return rp.Name == "always"
+}
+
+// IsOnFailure indicates whether the container has the "on-failure" restart policy.
+// This means the container will automatically restart of exiting with a non-zero exit status.
+func (rp *RestartPolicy) IsOnFailure() bool {
+ return rp.Name == "on-failure"
+}
+
+// IsUnlessStopped indicates whether the container has the
+// "unless-stopped" restart policy. This means the container will
+// automatically restart unless user has put it to stopped state.
+func (rp *RestartPolicy) IsUnlessStopped() bool {
+ return rp.Name == "unless-stopped"
+}
+
+// IsSame compares two RestartPolicy to see if they are the same
+func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
+ return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
+}
+
+// LogMode is a type to define the available modes for logging
+// These modes affect how logs are handled when log messages start piling up.
+type LogMode string
+
+// Available logging modes
+const (
+ LogModeUnset = ""
+ LogModeBlocking LogMode = "blocking"
+ LogModeNonBlock LogMode = "non-blocking"
+)
+
+// LogConfig represents the logging configuration of the container.
+type LogConfig struct {
+ Type string
+ Config map[string]string
+}
+
+// Resources contains container's resources (cgroups config, ulimits...)
+type Resources struct {
+ // Applicable to all platforms
+ CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
+ Memory int64 // Memory limit (in bytes)
+ NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
+
+ // Applicable to UNIX platforms
+ CgroupParent string // Parent cgroup.
+ BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
+ BlkioWeightDevice []*blkiodev.WeightDevice
+ BlkioDeviceReadBps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
+ BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
+ CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
+ CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
+ CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
+ CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
+ CpusetCpus string // CpusetCpus 0-2, 0,1
+ CpusetMems string // CpusetMems 0-2, 0,1
+ Devices []DeviceMapping // List of devices to map inside the container
+ DeviceCgroupRules []string // List of rule to be added to the device cgroup
+ DiskQuota int64 // Disk limit (in bytes)
+ KernelMemory int64 // Kernel memory limit (in bytes)
+ MemoryReservation int64 // Memory soft limit (in bytes)
+ MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwappiness *int64 // Tuning container memory swappiness behaviour
+ OomKillDisable *bool // Whether to disable OOM Killer or not
+ PidsLimit int64 // Setting pids limit for a container
+ Ulimits []*units.Ulimit // List of ulimits to be set in the container
+
+ // Applicable to Windows
+ CPUCount int64 `json:"CpuCount"` // CPU count
+ CPUPercent int64 `json:"CpuPercent"` // CPU percent
+ IOMaximumIOps uint64 // Maximum IOps for the container system drive
+ IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
+}
+
+// UpdateConfig holds the mutable attributes of a Container.
+// Those attributes can be updated at runtime.
+type UpdateConfig struct {
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+ RestartPolicy RestartPolicy
+}
+
+// HostConfig the non-portable Config structure of a container.
+// Here, "non-portable" means "dependent of the host we are running on".
+// Portable information *should* appear in Config.
+type HostConfig struct {
+ // Applicable to all platforms
+ Binds []string // List of volume bindings for this container
+ ContainerIDFile string // File (path) where the containerId is written
+ LogConfig LogConfig // Configuration of the logs for this container
+ NetworkMode NetworkMode // Network mode to use for the container
+ PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
+ RestartPolicy RestartPolicy // Restart policy to be used for the container
+ AutoRemove bool // Automatically remove container when it exits
+ VolumeDriver string // Name of the volume driver used to mount volumes
+ VolumesFrom []string // List of volumes to take from other container
+
+ // Applicable to UNIX platforms
+ CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
+ CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
+ DNS []string `json:"Dns"` // List of DNS server to lookup
+ DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
+ DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
+ ExtraHosts []string // List of extra hosts
+ GroupAdd []string // List of additional groups that the container process will run as
+ IpcMode IpcMode // IPC namespace to use for the container
+ Cgroup CgroupSpec // Cgroup to use for the container
+ Links []string // List of links (in the name:alias form)
+ OomScoreAdj int // Container preference for OOM-killing
+ PidMode PidMode // PID namespace to use for the container
+ Privileged bool // Is the container in privileged mode
+ PublishAllPorts bool // Should docker publish all exposed port for the container
+ ReadonlyRootfs bool // Is the container root filesystem in read-only
+ SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
+ StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
+ Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
+ UTSMode UTSMode // UTS namespace to use for the container
+ UsernsMode UsernsMode // The user namespace to use for the container
+ ShmSize int64 // Total shm memory usage
+ Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
+ Runtime string `json:",omitempty"` // Runtime to use with this container
+
+ // Applicable to Windows
+ ConsoleSize [2]uint // Initial console size (height,width)
+ Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
+
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+
+ // Mounts specs used by the container
+ Mounts []mount.Mount `json:",omitempty"`
+
+ // Run a custom init inside the container, if null, use the daemon's configured settings
+ Init *bool `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
new file mode 100644
index 000000000..2d664d1c9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
@@ -0,0 +1,41 @@
+// +build !windows
+
+package container
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsBridge() {
+ return "bridge"
+ } else if n.IsHost() {
+ return "host"
+ } else if n.IsContainer() {
+ return "container"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsDefault() {
+ return "default"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+ return ""
+}
+
+// IsBridge indicates whether container uses the bridge network stack
+func (n NetworkMode) IsBridge() bool {
+ return n == "bridge"
+}
+
+// IsHost indicates whether container uses the host network stack.
+func (n NetworkMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
new file mode 100644
index 000000000..469923f7e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
@@ -0,0 +1,54 @@
+package container
+
+import (
+ "strings"
+)
+
+// IsBridge indicates whether container uses the bridge network stack
+// in windows it is given the name NAT
+func (n NetworkMode) IsBridge() bool {
+ return n == "nat"
+}
+
+// IsHost indicates whether container uses the host network stack.
+// returns false as this is not supported by windows
+func (n NetworkMode) IsHost() bool {
+ return false
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
+}
+
+// IsHyperV indicates the use of a Hyper-V partition for isolation
+func (i Isolation) IsHyperV() bool {
+ return strings.ToLower(string(i)) == "hyperv"
+}
+
+// IsProcess indicates the use of process isolation
+func (i Isolation) IsProcess() bool {
+ return strings.ToLower(string(i)) == "process"
+}
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault() || i.IsHyperV() || i.IsProcess()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsDefault() {
+ return "default"
+ } else if n.IsBridge() {
+ return "nat"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsContainer() {
+ return "container"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go
new file mode 100644
index 000000000..64820fe35
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go
@@ -0,0 +1,22 @@
+package container
+
+// WaitCondition is a type used to specify a container state for which
+// to wait.
+type WaitCondition string
+
+// Possible WaitCondition Values.
+//
+// WaitConditionNotRunning (default) is used to wait for any of the non-running
+// states: "created", "exited", "dead", "removing", or "removed".
+//
+// WaitConditionNextExit is used to wait for the next time the state changes
+// to a non-running state. If the state is currently "created" or "exited",
+// this would cause Wait() to block until either the container runs and exits
+// or is removed.
+//
+// WaitConditionRemoved is used to wait for the container to be removed.
+const (
+ WaitConditionNotRunning WaitCondition = "not-running"
+ WaitConditionNextExit WaitCondition = "next-exit"
+ WaitConditionRemoved WaitCondition = "removed"
+)
diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go
new file mode 100644
index 000000000..dc942d9d9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/error_response.go
@@ -0,0 +1,13 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ErrorResponse Represents an error.
+// swagger:model ErrorResponse
+type ErrorResponse struct {
+
+ // The error message.
+ // Required: true
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go
new file mode 100644
index 000000000..e292565b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/events/events.go
@@ -0,0 +1,52 @@
+package events
+
+const (
+ // ContainerEventType is the event type that containers generate
+ ContainerEventType = "container"
+ // DaemonEventType is the event type that daemon generate
+ DaemonEventType = "daemon"
+ // ImageEventType is the event type that images generate
+ ImageEventType = "image"
+ // NetworkEventType is the event type that networks generate
+ NetworkEventType = "network"
+ // PluginEventType is the event type that plugins generate
+ PluginEventType = "plugin"
+ // VolumeEventType is the event type that volumes generate
+ VolumeEventType = "volume"
+ // ServiceEventType is the event type that services generate
+ ServiceEventType = "service"
+ // NodeEventType is the event type that nodes generate
+ NodeEventType = "node"
+ // SecretEventType is the event type that secrets generate
+ SecretEventType = "secret"
+ // ConfigEventType is the event type that configs generate
+ ConfigEventType = "config"
+)
+
+// Actor describes something that generates events,
+// like a container, or a network, or a volume.
+// It has a defined name and a set or attributes.
+// The container attributes are its labels, other actors
+// can generate these attributes from other properties.
+type Actor struct {
+ ID string
+ Attributes map[string]string
+}
+
+// Message represents the information an event contains
+type Message struct {
+ // Deprecated information from JSONMessage.
+ // With data only in container events.
+ Status string `json:"status,omitempty"`
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+
+ Type string
+ Action string
+ Actor Actor
+ // Engine events are local scope. Cluster events are swarm scope.
+ Scope string `json:"scope,omitempty"`
+
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
new file mode 100644
index 000000000..beec3d494
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -0,0 +1,310 @@
+// Package filters provides helper function to parse and handle command line
+// filter, used for example in docker ps or docker images commands.
+package filters
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/docker/docker/api/types/versions"
+)
+
+// Args stores filter arguments as map key:{map key: bool}.
+// It contains an aggregation of the map of arguments (which are in the form
+// of -f 'key=value') based on the key, and stores values for the same key
+// in a map with string keys and boolean values.
+// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
+// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+type Args struct {
+ fields map[string]map[string]bool
+}
+
+// NewArgs initializes a new Args struct.
+func NewArgs() Args {
+ return Args{fields: map[string]map[string]bool{}}
+}
+
+// ParseFlag parses the argument to the filter flag. Like
+//
+// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
+//
+// If prev map is provided, then it is appended to, and returned. By default a new
+// map is created.
+func ParseFlag(arg string, prev Args) (Args, error) {
+ filters := prev
+ if len(arg) == 0 {
+ return filters, nil
+ }
+
+ if !strings.Contains(arg, "=") {
+ return filters, ErrBadFormat
+ }
+
+ f := strings.SplitN(arg, "=", 2)
+
+ name := strings.ToLower(strings.TrimSpace(f[0]))
+ value := strings.TrimSpace(f[1])
+
+ filters.Add(name, value)
+
+ return filters, nil
+}
+
+// ErrBadFormat is an error returned in case of bad format for a filter.
+var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
+
+// ToParam packs the Args into a string for easy transport from client to server.
+func ToParam(a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ buf, err := json.Marshal(a.fields)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// ToParamWithVersion packs the Args into a string for easy transport from client to server.
+// The generated string will depend on the specified version (corresponding to the API version).
+func ToParamWithVersion(version string, a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ // for daemons older than v1.10, filter must be of the form map[string][]string
+ var buf []byte
+ var err error
+ if version != "" && versions.LessThan(version, "1.22") {
+ buf, err = json.Marshal(convertArgsToSlice(a.fields))
+ } else {
+ buf, err = json.Marshal(a.fields)
+ }
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// FromParam unpacks the filter Args.
+func FromParam(p string) (Args, error) {
+ if len(p) == 0 {
+ return NewArgs(), nil
+ }
+
+ r := strings.NewReader(p)
+ d := json.NewDecoder(r)
+
+ m := map[string]map[string]bool{}
+ if err := d.Decode(&m); err != nil {
+ r.Seek(0, 0)
+
+ // Allow parsing old arguments in slice format.
+ // Because other libraries might be sending them in this format.
+ deprecated := map[string][]string{}
+ if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
+ m = deprecatedArgs(deprecated)
+ } else {
+ return NewArgs(), err
+ }
+ }
+ return Args{m}, nil
+}
+
+// Get returns the list of values associates with a field.
+// It returns a slice of strings to keep backwards compatibility with old code.
+func (filters Args) Get(field string) []string {
+ values := filters.fields[field]
+ if values == nil {
+ return make([]string, 0)
+ }
+ slice := make([]string, 0, len(values))
+ for key := range values {
+ slice = append(slice, key)
+ }
+ return slice
+}
+
+// Add adds a new value to a filter field.
+func (filters Args) Add(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ filters.fields[name][value] = true
+ } else {
+ filters.fields[name] = map[string]bool{value: true}
+ }
+}
+
+// Del removes a value from a filter field.
+func (filters Args) Del(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ delete(filters.fields[name], value)
+ if len(filters.fields[name]) == 0 {
+ delete(filters.fields, name)
+ }
+ }
+}
+
+// Len returns the number of fields in the arguments.
+func (filters Args) Len() int {
+ return len(filters.fields)
+}
+
+// MatchKVList returns true if the values for the specified field matches the ones
+// from the sources.
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'label' and sources are {'label1': '1', 'label2': '2'}
+// it returns true.
+func (filters Args) MatchKVList(field string, sources map[string]string) bool {
+ fieldValues := filters.fields[field]
+
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+
+ if len(sources) == 0 {
+ return false
+ }
+
+ for name2match := range fieldValues {
+ testKV := strings.SplitN(name2match, "=", 2)
+
+ v, ok := sources[testKV[0]]
+ if !ok {
+ return false
+ }
+ if len(testKV) == 2 && testKV[1] != v {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Match returns true if the values for the specified field matches the source string
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'image.name' and source is 'ubuntu'
+// it returns true.
+func (filters Args) Match(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for name2match := range fieldValues {
+ match, err := regexp.MatchString(name2match, source)
+ if err != nil {
+ continue
+ }
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+// ExactMatch returns true if the source matches exactly one of the filters.
+func (filters Args) ExactMatch(field, source string) bool {
+ fieldValues, ok := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if !ok || len(fieldValues) == 0 {
+ return true
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
+func (filters Args) UniqueExactMatch(field, source string) bool {
+ fieldValues := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+ if len(filters.fields[field]) != 1 {
+ return false
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// FuzzyMatch returns true if the source matches exactly one of the filters,
+// or the source has one of the filters as a prefix.
+func (filters Args) FuzzyMatch(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for prefix := range fieldValues {
+ if strings.HasPrefix(source, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Include returns true if the name of the field to filter is in the filters.
+func (filters Args) Include(field string) bool {
+ _, ok := filters.fields[field]
+ return ok
+}
+
+// Validate ensures that all the fields in the filter are valid.
+// It returns an error as soon as it finds an invalid field.
+func (filters Args) Validate(accepted map[string]bool) error {
+ for name := range filters.fields {
+ if !accepted[name] {
+ return fmt.Errorf("Invalid filter '%s'", name)
+ }
+ }
+ return nil
+}
+
+// WalkValues iterates over the list of filtered values for a field.
+// It stops the iteration if it finds an error and it returns that error.
+func (filters Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := filters.fields[field]; !ok {
+ return nil
+ }
+ for v := range filters.fields[field] {
+ if err := op(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
+ m := map[string]map[string]bool{}
+ for k, v := range d {
+ values := map[string]bool{}
+ for _, vv := range v {
+ values[vv] = true
+ }
+ m[k] = values
+ }
+ return m
+}
+
+func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
+ m := map[string][]string{}
+ for k, v := range f {
+ values := []string{}
+ for kk := range v {
+ if v[kk] {
+ values = append(values, kk)
+ }
+ }
+ m[k] = values
+ }
+ return m
+}
diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go
new file mode 100644
index 000000000..4d9bf1c62
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go
@@ -0,0 +1,17 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// GraphDriverData Information about a container's graph driver.
+// swagger:model GraphDriverData
+type GraphDriverData struct {
+
+ // data
+ // Required: true
+ Data map[string]string `json:"Data"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go
new file mode 100644
index 000000000..7592d2f8b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/id_response.go
@@ -0,0 +1,13 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// IDResponse Response to an API call that returns just an Id
+// swagger:model IdResponse
+type IDResponse struct {
+
+ // The id of the newly created object.
+ // Required: true
+ ID string `json:"Id"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go
new file mode 100644
index 000000000..0dd30c729
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image/image_history.go
@@ -0,0 +1,37 @@
+package image
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// HistoryResponseItem history response item
+// swagger:model HistoryResponseItem
+type HistoryResponseItem struct {
+
+ // comment
+ // Required: true
+ Comment string `json:"Comment"`
+
+ // created
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // created by
+ // Required: true
+ CreatedBy string `json:"CreatedBy"`
+
+ // Id
+ // Required: true
+ ID string `json:"Id"`
+
+ // size
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // tags
+ // Required: true
+ Tags []string `json:"Tags"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
new file mode 100644
index 000000000..b9a65a0d8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
@@ -0,0 +1,15 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ImageDeleteResponseItem image delete response item
+// swagger:model ImageDeleteResponseItem
+type ImageDeleteResponseItem struct {
+
+ // The image ID of an image that was deleted
+ Deleted string `json:"Deleted,omitempty"`
+
+ // The image ID of an image that was untagged
+ Untagged string `json:"Untagged,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go
new file mode 100644
index 000000000..e145b3dcf
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/image_summary.go
@@ -0,0 +1,49 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ImageSummary image summary
+// swagger:model ImageSummary
+type ImageSummary struct {
+
+ // containers
+ // Required: true
+ Containers int64 `json:"Containers"`
+
+ // created
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // Id
+ // Required: true
+ ID string `json:"Id"`
+
+ // labels
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // parent Id
+ // Required: true
+ ParentID string `json:"ParentId"`
+
+ // repo digests
+ // Required: true
+ RepoDigests []string `json:"RepoDigests"`
+
+ // repo tags
+ // Required: true
+ RepoTags []string `json:"RepoTags"`
+
+ // shared size
+ // Required: true
+ SharedSize int64 `json:"SharedSize"`
+
+ // size
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // virtual size
+ // Required: true
+ VirtualSize int64 `json:"VirtualSize"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go
new file mode 100644
index 000000000..2744f85d6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/mount/mount.go
@@ -0,0 +1,128 @@
+package mount
+
+import (
+ "os"
+)
+
+// Type represents the type of a mount.
+type Type string
+
+// Type constants
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind Type = "bind"
+ // TypeVolume is the type for remote storage volumes
+ TypeVolume Type = "volume"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs Type = "tmpfs"
+)
+
+// Mount represents a mount (volume).
+type Mount struct {
+ Type Type `json:",omitempty"`
+ // Source specifies the name of the mount. Depending on mount type, this
+ // may be a volume name or a host path, or even ignored.
+ // Source is not supported for tmpfs (must be an empty value)
+ Source string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Consistency Consistency `json:",omitempty"`
+
+ BindOptions *BindOptions `json:",omitempty"`
+ VolumeOptions *VolumeOptions `json:",omitempty"`
+ TmpfsOptions *TmpfsOptions `json:",omitempty"`
+}
+
+// Propagation represents the propagation of a mount.
+type Propagation string
+
+const (
+ // PropagationRPrivate RPRIVATE
+ PropagationRPrivate Propagation = "rprivate"
+ // PropagationPrivate PRIVATE
+ PropagationPrivate Propagation = "private"
+ // PropagationRShared RSHARED
+ PropagationRShared Propagation = "rshared"
+ // PropagationShared SHARED
+ PropagationShared Propagation = "shared"
+ // PropagationRSlave RSLAVE
+ PropagationRSlave Propagation = "rslave"
+ // PropagationSlave SLAVE
+ PropagationSlave Propagation = "slave"
+)
+
+// Propagations is the list of all valid mount propagations
+var Propagations = []Propagation{
+ PropagationRPrivate,
+ PropagationPrivate,
+ PropagationRShared,
+ PropagationShared,
+ PropagationRSlave,
+ PropagationSlave,
+}
+
+// Consistency represents the consistency requirements of a mount.
+type Consistency string
+
+const (
+ // ConsistencyFull guarantees bind-mount-like consistency
+ ConsistencyFull Consistency = "consistent"
+ // ConsistencyCached mounts can cache read data and FS structure
+ ConsistencyCached Consistency = "cached"
+ // ConsistencyDelegated mounts can cache read and written data and structure
+ ConsistencyDelegated Consistency = "delegated"
+ // ConsistencyDefault provides "consistent" behavior unless overridden
+ ConsistencyDefault Consistency = "default"
+)
+
+// BindOptions defines options specific to mounts of type "bind".
+type BindOptions struct {
+ Propagation Propagation `json:",omitempty"`
+}
+
+// VolumeOptions represents the options for a mount of type volume.
+type VolumeOptions struct {
+ NoCopy bool `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ DriverConfig *Driver `json:",omitempty"`
+}
+
+// Driver represents a volume driver.
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TmpfsOptions defines options specific to mounts of type "tmpfs".
+type TmpfsOptions struct {
+ // Size sets the size of the tmpfs, in bytes.
+ //
+ // This will be converted to an operating system specific value
+ // depending on the host. For example, on linux, it will be converted to
+ // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
+ // docker, uses a straight byte value.
+ //
+ // Percentages are not supported.
+ SizeBytes int64 `json:",omitempty"`
+ // Mode of the tmpfs upon creation
+ Mode os.FileMode `json:",omitempty"`
+
+ // TODO(stevvooe): There are several more tmpfs flags, specified in the
+ // daemon, that are accepted. Only the most basic are added for now.
+ //
+ // From docker/docker/pkg/mount/flags.go:
+ //
+ // var validFlags = map[string]bool{
+ // "": true,
+ // "size": true, X
+ // "mode": true, X
+ // "uid": true,
+ // "gid": true,
+ // "nr_inodes": true,
+ // "nr_blocks": true,
+ // "mpol": true,
+ // }
+ //
+ // Some of these may be straightforward to add, but others, such as
+ // uid/gid have implications in a clustered system.
+}
diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go
new file mode 100644
index 000000000..7c7dbacc8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/network/network.go
@@ -0,0 +1,108 @@
+package network
+
+// Address represents an IP address
+type Address struct {
+ Addr string
+ PrefixLen int
+}
+
+// IPAM represents IP Address Management
+type IPAM struct {
+ Driver string
+ Options map[string]string //Per network IPAM driver options
+ Config []IPAMConfig
+}
+
+// IPAMConfig represents IPAM configurations
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ IPRange string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+ AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
+}
+
+// EndpointIPAMConfig represents IPAM configurations for the endpoint
+type EndpointIPAMConfig struct {
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+ LinkLocalIPs []string `json:",omitempty"`
+}
+
+// Copy makes a copy of the endpoint ipam config
+func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
+ cfgCopy := *cfg
+ cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
+ cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
+ return &cfgCopy
+}
+
+// PeerInfo represents one peer of an overlay network
+type PeerInfo struct {
+ Name string
+ IP string
+}
+
+// EndpointSettings stores the network endpoint details
+type EndpointSettings struct {
+ // Configurations
+ IPAMConfig *EndpointIPAMConfig
+ Links []string
+ Aliases []string
+ // Operational data
+ NetworkID string
+ EndpointID string
+ Gateway string
+ IPAddress string
+ IPPrefixLen int
+ IPv6Gateway string
+ GlobalIPv6Address string
+ GlobalIPv6PrefixLen int
+ MacAddress string
+ DriverOpts map[string]string
+}
+
+// Task carries the information about one backend task
+type Task struct {
+ Name string
+ EndpointID string
+ EndpointIP string
+ Info map[string]string
+}
+
+// ServiceInfo represents service parameters with the list of service's tasks
+type ServiceInfo struct {
+ VIP string
+ Ports []string
+ LocalLBIndex int
+ Tasks []Task
+}
+
+// Copy makes a deep copy of `EndpointSettings`
+func (es *EndpointSettings) Copy() *EndpointSettings {
+ epCopy := *es
+ if es.IPAMConfig != nil {
+ epCopy.IPAMConfig = es.IPAMConfig.Copy()
+ }
+
+ if es.Links != nil {
+ links := make([]string, 0, len(es.Links))
+ epCopy.Links = append(links, es.Links...)
+ }
+
+ if es.Aliases != nil {
+ aliases := make([]string, 0, len(es.Aliases))
+ epCopy.Aliases = append(aliases, es.Aliases...)
+ }
+ return &epCopy
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
+}
+
+// ConfigReference specifies the source which provides a network's configuration
+type ConfigReference struct {
+ Network string
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go
new file mode 100644
index 000000000..cab333e01
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin.go
@@ -0,0 +1,200 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Plugin A plugin for the Engine API
+// swagger:model Plugin
+type Plugin struct {
+
+ // config
+ // Required: true
+ Config PluginConfig `json:"Config"`
+
+ // True if the plugin is running. False if the plugin is not running, only installed.
+ // Required: true
+ Enabled bool `json:"Enabled"`
+
+ // Id
+ ID string `json:"Id,omitempty"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // plugin remote reference used to push/pull the plugin
+ PluginReference string `json:"PluginReference,omitempty"`
+
+ // settings
+ // Required: true
+ Settings PluginSettings `json:"Settings"`
+}
+
+// PluginConfig The config of a plugin.
+// swagger:model PluginConfig
+type PluginConfig struct {
+
+ // args
+ // Required: true
+ Args PluginConfigArgs `json:"Args"`
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // Docker Version used to create the plugin
+ DockerVersion string `json:"DockerVersion,omitempty"`
+
+ // documentation
+ // Required: true
+ Documentation string `json:"Documentation"`
+
+ // entrypoint
+ // Required: true
+ Entrypoint []string `json:"Entrypoint"`
+
+ // env
+ // Required: true
+ Env []PluginEnv `json:"Env"`
+
+ // interface
+ // Required: true
+ Interface PluginConfigInterface `json:"Interface"`
+
+ // ipc host
+ // Required: true
+ IpcHost bool `json:"IpcHost"`
+
+ // linux
+ // Required: true
+ Linux PluginConfigLinux `json:"Linux"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+
+ // network
+ // Required: true
+ Network PluginConfigNetwork `json:"Network"`
+
+ // pid host
+ // Required: true
+ PidHost bool `json:"PidHost"`
+
+ // propagated mount
+ // Required: true
+ PropagatedMount string `json:"PropagatedMount"`
+
+ // user
+ User PluginConfigUser `json:"User,omitempty"`
+
+ // work dir
+ // Required: true
+ WorkDir string `json:"WorkDir"`
+
+ // rootfs
+ Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
+}
+
+// PluginConfigArgs plugin config args
+// swagger:model PluginConfigArgs
+type PluginConfigArgs struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value []string `json:"Value"`
+}
+
+// PluginConfigInterface The interface between Docker and the plugin
+// swagger:model PluginConfigInterface
+type PluginConfigInterface struct {
+
+ // socket
+ // Required: true
+ Socket string `json:"Socket"`
+
+ // types
+ // Required: true
+ Types []PluginInterfaceType `json:"Types"`
+}
+
+// PluginConfigLinux plugin config linux
+// swagger:model PluginConfigLinux
+type PluginConfigLinux struct {
+
+ // allow all devices
+ // Required: true
+ AllowAllDevices bool `json:"AllowAllDevices"`
+
+ // capabilities
+ // Required: true
+ Capabilities []string `json:"Capabilities"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+}
+
+// PluginConfigNetwork plugin config network
+// swagger:model PluginConfigNetwork
+type PluginConfigNetwork struct {
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
+
+// PluginConfigRootfs plugin config rootfs
+// swagger:model PluginConfigRootfs
+type PluginConfigRootfs struct {
+
+ // diff ids
+ DiffIds []string `json:"diff_ids"`
+
+ // type
+ Type string `json:"type,omitempty"`
+}
+
+// PluginConfigUser plugin config user
+// swagger:model PluginConfigUser
+type PluginConfigUser struct {
+
+ // g ID
+ GID uint32 `json:"GID,omitempty"`
+
+ // UID
+ UID uint32 `json:"UID,omitempty"`
+}
+
+// PluginSettings Settings that can be modified by users.
+// swagger:model PluginSettings
+type PluginSettings struct {
+
+ // args
+ // Required: true
+ Args []string `json:"Args"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+
+ // env
+ // Required: true
+ Env []string `json:"Env"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go
new file mode 100644
index 000000000..569901067
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_device.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginDevice plugin device
+// swagger:model PluginDevice
+type PluginDevice struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // path
+ // Required: true
+ Path *string `json:"Path"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go
new file mode 100644
index 000000000..32962dc2e
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_env.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginEnv plugin env
+// swagger:model PluginEnv
+type PluginEnv struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value *string `json:"Value"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
new file mode 100644
index 000000000..c82f204e8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
@@ -0,0 +1,21 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginInterfaceType plugin interface type
+// swagger:model PluginInterfaceType
+type PluginInterfaceType struct {
+
+ // capability
+ // Required: true
+ Capability string `json:"Capability"`
+
+ // prefix
+ // Required: true
+ Prefix string `json:"Prefix"`
+
+ // version
+ // Required: true
+ Version string `json:"Version"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go
new file mode 100644
index 000000000..5c031cf8b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go
@@ -0,0 +1,37 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginMount plugin mount
+// swagger:model PluginMount
+type PluginMount struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // destination
+ // Required: true
+ Destination string `json:"Destination"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // options
+ // Required: true
+ Options []string `json:"Options"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // source
+ // Required: true
+ Source *string `json:"Source"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go
new file mode 100644
index 000000000..18f743fcd
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go
@@ -0,0 +1,71 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// PluginsListResponse contains the response for the Engine API
+type PluginsListResponse []*Plugin
+
+// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
+func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
+ versionIndex := len(p)
+ prefixIndex := 0
+ if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
+ return fmt.Errorf("%q is not a plugin interface type", p)
+ }
+ p = p[1 : len(p)-1]
+loop:
+ for i, b := range p {
+ switch b {
+ case '.':
+ prefixIndex = i
+ case '/':
+ versionIndex = i
+ break loop
+ }
+ }
+ t.Prefix = string(p[:prefixIndex])
+ t.Capability = string(p[prefixIndex+1 : versionIndex])
+ if versionIndex < len(p) {
+ t.Version = string(p[versionIndex+1:])
+ }
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler for PluginInterfaceType
+func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String implements fmt.Stringer for PluginInterfaceType
+func (t PluginInterfaceType) String() string {
+ return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string
+ Description string
+ Value []string
+}
+
+// PluginPrivileges is a list of PluginPrivilege
+type PluginPrivileges []PluginPrivilege
+
+func (s PluginPrivileges) Len() int {
+ return len(s)
+}
+
+func (s PluginPrivileges) Less(i, j int) bool {
+ return s[i].Name < s[j].Name
+}
+
+func (s PluginPrivileges) Swap(i, j int) {
+ sort.Strings(s[i].Value)
+ sort.Strings(s[j].Value)
+ s[i], s[j] = s[j], s[i]
+}
diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go
new file mode 100644
index 000000000..ad52d46d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/port.go
@@ -0,0 +1,23 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Port An open port on a container
+// swagger:model Port
+type Port struct {
+
+ // IP
+ IP string `json:"IP,omitempty"`
+
+ // Port on the container
+ // Required: true
+ PrivatePort uint16 `json:"PrivatePort"`
+
+ // Port exposed on the host
+ PublicPort uint16 `json:"PublicPort,omitempty"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go
new file mode 100644
index 000000000..42cac4430
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go
@@ -0,0 +1,21 @@
+package registry
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// AuthenticateOKBody authenticate o k body
+// swagger:model AuthenticateOKBody
+type AuthenticateOKBody struct {
+
+ // An opaque token used to authenticate a user after a successful login
+ // Required: true
+ IdentityToken string `json:"IdentityToken"`
+
+ // The status of the authentication
+ // Required: true
+ Status string `json:"Status"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go
new file mode 100644
index 000000000..b98a943a1
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/registry/registry.go
@@ -0,0 +1,119 @@
+package registry
+
+import (
+ "encoding/json"
+ "net"
+
+ "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ServiceConfig stores daemon registry services configuration.
+type ServiceConfig struct {
+ AllowNondistributableArtifactsCIDRs []*NetIPNet
+ AllowNondistributableArtifactsHostnames []string
+ InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
+ IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
+ Mirrors []string
+}
+
+// NetIPNet is the net.IPNet type, which can be marshalled and
+// unmarshalled to JSON
+type NetIPNet net.IPNet
+
+// String returns the CIDR notation of ipnet
+func (ipnet *NetIPNet) String() string {
+ return (*net.IPNet)(ipnet).String()
+}
+
+// MarshalJSON returns the JSON representation of the IPNet
+func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(ipnet).String())
+}
+
+// UnmarshalJSON sets the IPNet from a byte array of JSON
+func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
+ var ipnetStr string
+ if err = json.Unmarshal(b, &ipnetStr); err == nil {
+ var cidr *net.IPNet
+ if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
+ *ipnet = NetIPNet(*cidr)
+ }
+ }
+ return
+}
+
+// IndexInfo contains information about a registry
+//
+// RepositoryInfo Examples:
+// {
+// "Index" : {
+// "Name" : "docker.io",
+// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
+// "Secure" : true,
+// "Official" : true,
+// },
+// "RemoteName" : "library/debian",
+// "LocalName" : "debian",
+// "CanonicalName" : "docker.io/debian"
+// "Official" : true,
+// }
+//
+// {
+// "Index" : {
+// "Name" : "127.0.0.1:5000",
+// "Mirrors" : [],
+// "Secure" : false,
+// "Official" : false,
+// },
+// "RemoteName" : "user/repo",
+// "LocalName" : "127.0.0.1:5000/user/repo",
+// "CanonicalName" : "127.0.0.1:5000/user/repo",
+// "Official" : false,
+// }
+type IndexInfo struct {
+ // Name is the name of the registry, such as "docker.io"
+ Name string
+ // Mirrors is a list of mirrors, expressed as URIs
+ Mirrors []string
+ // Secure is set to false if the registry is part of the list of
+ // insecure registries. Insecure registries accept HTTP and/or accept
+ // HTTPS with certificates from unknown CAs.
+ Secure bool
+ // Official indicates whether this is an official registry
+ Official bool
+}
+
+// SearchResult describes a search result returned from a registry
+type SearchResult struct {
+ // StarCount indicates the number of stars this repository has
+ StarCount int `json:"star_count"`
+ // IsOfficial is true if the result is from an official repository.
+ IsOfficial bool `json:"is_official"`
+ // Name is the name of the repository
+ Name string `json:"name"`
+ // IsAutomated indicates whether the result is automated
+ IsAutomated bool `json:"is_automated"`
+ // Description is a textual description of the repository
+ Description string `json:"description"`
+}
+
+// SearchResults lists a collection search results returned from a registry
+type SearchResults struct {
+ // Query contains the query string that generated the search results
+ Query string `json:"query"`
+ // NumResults indicates the number of results the query returned
+ NumResults int `json:"num_results"`
+ // Results is a slice containing the actual results for the search
+ Results []SearchResult `json:"results"`
+}
+
+// DistributionInspect describes the result obtained from contacting the
+// registry to retrieve image metadata
+type DistributionInspect struct {
+ // Descriptor contains information about the manifest, including
+ // the content addressable digest
+ Descriptor v1.Descriptor
+ // Platforms contains the list of platforms supported by the image,
+ // obtained by parsing the manifest
+ Platforms []v1.Platform
+}
diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go
new file mode 100644
index 000000000..7d62c9a43
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/seccomp.go
@@ -0,0 +1,93 @@
+package types
+
+// Seccomp represents the config for a seccomp profile for syscall restriction.
+type Seccomp struct {
+ DefaultAction Action `json:"defaultAction"`
+ // Architectures is kept to maintain backward compatibility with the old
+ // seccomp profile.
+ Architectures []Arch `json:"architectures,omitempty"`
+ ArchMap []Architecture `json:"archMap,omitempty"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Architecture is used to represent a specific architecture
+// and its sub-architectures
+type Architecture struct {
+ Arch Arch `json:"architecture"`
+ SubArches []Arch `json:"subArchitectures"`
+}
+
+// Arch used for architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+ ArchX86 Arch = "SCMP_ARCH_X86"
+ ArchX86_64 Arch = "SCMP_ARCH_X86_64"
+ ArchX32 Arch = "SCMP_ARCH_X32"
+ ArchARM Arch = "SCMP_ARCH_ARM"
+ ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
+ ArchMIPS Arch = "SCMP_ARCH_MIPS"
+ ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
+ ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
+ ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
+ ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
+ ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+ ArchPPC Arch = "SCMP_ARCH_PPC"
+ ArchPPC64 Arch = "SCMP_ARCH_PPC64"
+ ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
+ ArchS390 Arch = "SCMP_ARCH_S390"
+ ArchS390X Arch = "SCMP_ARCH_S390X"
+)
+
+// Action taken upon Seccomp rule match
+type Action string
+
+// Define actions for Seccomp rules
+const (
+ ActKill Action = "SCMP_ACT_KILL"
+ ActTrap Action = "SCMP_ACT_TRAP"
+ ActErrno Action = "SCMP_ACT_ERRNO"
+ ActTrace Action = "SCMP_ACT_TRACE"
+ ActAllow Action = "SCMP_ACT_ALLOW"
+)
+
+// Operator used to match syscall arguments in Seccomp
+type Operator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+ OpNotEqual Operator = "SCMP_CMP_NE"
+ OpLessThan Operator = "SCMP_CMP_LT"
+ OpLessEqual Operator = "SCMP_CMP_LE"
+ OpEqualTo Operator = "SCMP_CMP_EQ"
+ OpGreaterEqual Operator = "SCMP_CMP_GE"
+ OpGreaterThan Operator = "SCMP_CMP_GT"
+ OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
+)
+
+// Arg used for matching specific syscall arguments in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"valueTwo"`
+ Op Operator `json:"op"`
+}
+
+// Filter is used to conditionally apply Seccomp rules
+type Filter struct {
+ Caps []string `json:"caps,omitempty"`
+ Arches []string `json:"arches,omitempty"`
+}
+
+// Syscall is used to match a group of syscalls in Seccomp
+type Syscall struct {
+ Name string `json:"name,omitempty"`
+ Names []string `json:"names,omitempty"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+ Comment string `json:"comment"`
+ Includes Filter `json:"includes"`
+ Excludes Filter `json:"excludes"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go
new file mode 100644
index 000000000..74ea64b1b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/service_update_response.go
@@ -0,0 +1,12 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ServiceUpdateResponse service update response
+// swagger:model ServiceUpdateResponse
+type ServiceUpdateResponse struct {
+
+ // Optional warning messages
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go
new file mode 100644
index 000000000..7ca76a5b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/stats.go
@@ -0,0 +1,181 @@
+// Package types is used for API stability in the types and response to the
+// consumers of the API stats endpoint.
+package types
+
+import "time"
+
+// ThrottlingData stores CPU throttling stats of one running container.
+// Not used on Windows.
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods"`
+ // Number of periods when the container hits its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time"`
+}
+
+// CPUUsage stores All CPU stats aggregated since container inception.
+type CPUUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds (Linux)
+ // Units: 100's of nanoseconds (Windows)
+ TotalUsage uint64 `json:"total_usage"`
+
+ // Total CPU time consumed per core (Linux). Not used on Windows.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+
+ // Time spent by tasks of the cgroup in kernel mode (Linux).
+ // Time spent by all container processes in kernel mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+
+ // Time spent by tasks of the cgroup in user mode (Linux).
+ // Time spent by all container processes in user mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+// CPUStats aggregates and wraps all CPU related info of container
+type CPUStats struct {
+ // CPU Usage. Linux and Windows.
+ CPUUsage CPUUsage `json:"cpu_usage"`
+
+ // System Usage. Linux only.
+ SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
+
+ // Online CPUs. Linux only.
+ OnlineCPUs uint32 `json:"online_cpus,omitempty"`
+
+ // Throttling Data. Linux only.
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+// MemoryStats aggregates all memory stats since container inception on Linux.
+// Windows returns stats for commit and private working set only.
+type MemoryStats struct {
+ // Linux Memory Stats
+
+ // current res_counter usage for memory
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // TODO(vishh): Export these as stronger types.
+ // all the stats exported via memory.stat.
+ Stats map[string]uint64 `json:"stats,omitempty"`
+ // number of times memory usage hits limits.
+ Failcnt uint64 `json:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty"`
+
+ // Windows Memory Stats
+ // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
+
+ // committed bytes
+ Commit uint64 `json:"commitbytes,omitempty"`
+ // peak committed bytes
+ CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
+ // private working set
+ PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
+}
+
+// BlkioStatEntry is one small entity to store a piece of Blkio stats
+// Not used on Windows.
+type BlkioStatEntry struct {
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+ Op string `json:"op"`
+ Value uint64 `json:"value"`
+}
+
+// BlkioStats stores All IO service stats for data read and write.
+// This is a Linux specific structure as the differences between expressing
+// block I/O on Windows and Linux are sufficiently significant to make
+// little sense attempting to morph into a combined structure.
+type BlkioStats struct {
+ // number of bytes transferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
+}
+
+// StorageStats is the disk I/O stats for read/write on Windows.
+type StorageStats struct {
+ ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
+ ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
+ WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
+ WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
+}
+
+// NetworkStats aggregates the network stats of one container
+type NetworkStats struct {
+ // Bytes received. Windows and Linux.
+ RxBytes uint64 `json:"rx_bytes"`
+ // Packets received. Windows and Linux.
+ RxPackets uint64 `json:"rx_packets"`
+ // Received errors. Not used on Windows. Note that we dont `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ RxErrors uint64 `json:"rx_errors"`
+ // Incoming packets dropped. Windows and Linux.
+ RxDropped uint64 `json:"rx_dropped"`
+ // Bytes sent. Windows and Linux.
+ TxBytes uint64 `json:"tx_bytes"`
+ // Packets sent. Windows and Linux.
+ TxPackets uint64 `json:"tx_packets"`
+ // Sent errors. Not used on Windows. Note that we dont `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ TxErrors uint64 `json:"tx_errors"`
+ // Outgoing packets dropped. Windows and Linux.
+ TxDropped uint64 `json:"tx_dropped"`
+ // Endpoint ID. Not used on Linux.
+ EndpointID string `json:"endpoint_id,omitempty"`
+ // Instance ID. Not used on Linux.
+ InstanceID string `json:"instance_id,omitempty"`
+}
+
+// PidsStats contains the stats of a container's pids
+type PidsStats struct {
+ // Current is the number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // Limit is the hard limit on the number of pids in the cgroup.
+ // A "Limit" of 0 means that there is no limit.
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+// Stats is Ultimate struct aggregating all types of stats of one container
+type Stats struct {
+ // Common stats
+ Read time.Time `json:"read"`
+ PreRead time.Time `json:"preread"`
+
+ // Linux specific stats, not populated on Windows.
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+
+ // Windows specific stats, not populated on Linux.
+ NumProcs uint32 `json:"num_procs"`
+ StorageStats StorageStats `json:"storage_stats,omitempty"`
+
+ // Shared stats
+ CPUStats CPUStats `json:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+}
+
+// StatsJSON is newly used Networks
+type StatsJSON struct {
+ Stats
+
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+
+ // Networks request version >=1.21
+ Networks map[string]NetworkStats `json:"networks,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go
new file mode 100644
index 000000000..bad493fb8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go
new file mode 100644
index 000000000..2834cf202
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/common.go
@@ -0,0 +1,40 @@
+package swarm
+
+import "time"
+
+// Version represents the internal object version.
+type Version struct {
+ Index uint64 `json:",omitempty"`
+}
+
+// Meta is a base object inherited by most of the other once.
+type Meta struct {
+ Version Version `json:",omitempty"`
+ CreatedAt time.Time `json:",omitempty"`
+ UpdatedAt time.Time `json:",omitempty"`
+}
+
+// Annotations represents how to describe an object.
+type Annotations struct {
+ Name string `json:",omitempty"`
+ Labels map[string]string `json:"Labels"`
+}
+
+// Driver represents a driver (network, logging, secrets backend).
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TLSInfo represents the TLS information about what CA certificate is trusted,
+// and who the issuer for a TLS certificate is
+type TLSInfo struct {
+ // TrustRoot is the trusted CA root certificate in PEM format
+ TrustRoot string `json:",omitempty"`
+
+ // CertIssuer is the raw subject bytes of the issuer
+ CertIssuerSubject []byte `json:",omitempty"`
+
+ // CertIssuerPublicKey is the raw public key bytes of the issuer
+ CertIssuerPublicKey []byte `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go
new file mode 100644
index 000000000..0fb021ce9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/config.go
@@ -0,0 +1,31 @@
+package swarm
+
+import "os"
+
+// Config represents a config.
+type Config struct {
+ ID string
+ Meta
+ Spec ConfigSpec
+}
+
+// ConfigSpec represents a config specification from a config in swarm
+type ConfigSpec struct {
+ Annotations
+ Data []byte `json:",omitempty"`
+}
+
+// ConfigReferenceFileTarget is a file target in a config reference
+type ConfigReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// ConfigReference is a reference to a config in swarm
+type ConfigReference struct {
+ File *ConfigReferenceFileTarget
+ ConfigID string
+ ConfigName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go
new file mode 100644
index 000000000..6f8b45f6b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/container.go
@@ -0,0 +1,72 @@
+package swarm
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/mount"
+)
+
+// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
+// Detailed documentation is available in:
+// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
+// `nameserver`, `search`, `options` have been supported.
+// TODO: `domain` is not supported yet.
+type DNSConfig struct {
+ // Nameservers specifies the IP addresses of the name servers
+ Nameservers []string `json:",omitempty"`
+ // Search specifies the search list for host-name lookup
+ Search []string `json:",omitempty"`
+ // Options allows certain internal resolver variables to be modified
+ Options []string `json:",omitempty"`
+}
+
+// SELinuxContext contains the SELinux labels of the container.
+type SELinuxContext struct {
+ Disable bool
+
+ User string
+ Role string
+ Type string
+ Level string
+}
+
+// CredentialSpec for managed service account (Windows only)
+type CredentialSpec struct {
+ File string
+ Registry string
+}
+
+// Privileges defines the security options for the container.
+type Privileges struct {
+ CredentialSpec *CredentialSpec
+ SELinuxContext *SELinuxContext
+}
+
+// ContainerSpec represents the spec of a container.
+type ContainerSpec struct {
+ Image string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Command []string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Hostname string `json:",omitempty"`
+ Env []string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ User string `json:",omitempty"`
+ Groups []string `json:",omitempty"`
+ Privileges *Privileges `json:",omitempty"`
+ StopSignal string `json:",omitempty"`
+ TTY bool `json:",omitempty"`
+ OpenStdin bool `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Mounts []mount.Mount `json:",omitempty"`
+ StopGracePeriod *time.Duration `json:",omitempty"`
+ Healthcheck *container.HealthConfig `json:",omitempty"`
+ // The format of extra hosts on swarmkit is specified in:
+ // http://man7.org/linux/man-pages/man5/hosts.5.html
+ // IP_address canonical_hostname [aliases...]
+ Hosts []string `json:",omitempty"`
+ DNSConfig *DNSConfig `json:",omitempty"`
+ Secrets []*SecretReference `json:",omitempty"`
+ Configs []*ConfigReference `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go
new file mode 100644
index 000000000..97c484e14
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/network.go
@@ -0,0 +1,119 @@
+package swarm
+
+import (
+ "github.com/docker/docker/api/types/network"
+)
+
+// Endpoint represents an endpoint.
+type Endpoint struct {
+ Spec EndpointSpec `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+ VirtualIPs []EndpointVirtualIP `json:",omitempty"`
+}
+
+// EndpointSpec represents the spec of an endpoint.
+type EndpointSpec struct {
+ Mode ResolutionMode `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// ResolutionMode represents a resolution mode.
+type ResolutionMode string
+
+const (
+ // ResolutionModeVIP VIP
+ ResolutionModeVIP ResolutionMode = "vip"
+ // ResolutionModeDNSRR DNSRR
+ ResolutionModeDNSRR ResolutionMode = "dnsrr"
+)
+
+// PortConfig represents the config of a port.
+type PortConfig struct {
+ Name string `json:",omitempty"`
+ Protocol PortConfigProtocol `json:",omitempty"`
+ // TargetPort is the port inside the container
+ TargetPort uint32 `json:",omitempty"`
+ // PublishedPort is the port on the swarm hosts
+ PublishedPort uint32 `json:",omitempty"`
+ // PublishMode is the mode in which port is published
+ PublishMode PortConfigPublishMode `json:",omitempty"`
+}
+
+// PortConfigPublishMode represents the mode in which the port is to
+// be published.
+type PortConfigPublishMode string
+
+const (
+ // PortConfigPublishModeIngress is used for ports published
+ // for ingress load balancing using routing mesh.
+ PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
+ // PortConfigPublishModeHost is used for ports published
+ // for direct host level access on the host where the task is running.
+ PortConfigPublishModeHost PortConfigPublishMode = "host"
+)
+
+// PortConfigProtocol represents the protocol of a port.
+type PortConfigProtocol string
+
+const (
+ // TODO(stevvooe): These should be used generally, not just for PortConfig.
+
+ // PortConfigProtocolTCP TCP
+ PortConfigProtocolTCP PortConfigProtocol = "tcp"
+ // PortConfigProtocolUDP UDP
+ PortConfigProtocolUDP PortConfigProtocol = "udp"
+)
+
+// EndpointVirtualIP represents the virtual ip of a port.
+type EndpointVirtualIP struct {
+ NetworkID string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Network represents a network.
+type Network struct {
+ ID string
+ Meta
+ Spec NetworkSpec `json:",omitempty"`
+ DriverState Driver `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkSpec represents the spec of a network.
+type NetworkSpec struct {
+ Annotations
+ DriverConfiguration *Driver `json:",omitempty"`
+ IPv6Enabled bool `json:",omitempty"`
+ Internal bool `json:",omitempty"`
+ Attachable bool `json:",omitempty"`
+ Ingress bool `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+ ConfigFrom *network.ConfigReference `json:",omitempty"`
+ Scope string `json:",omitempty"`
+}
+
+// NetworkAttachmentConfig represents the configuration of a network attachment.
+type NetworkAttachmentConfig struct {
+ Target string `json:",omitempty"`
+ Aliases []string `json:",omitempty"`
+ DriverOpts map[string]string `json:",omitempty"`
+}
+
+// NetworkAttachment represents a network attachment.
+type NetworkAttachment struct {
+ Network Network `json:",omitempty"`
+ Addresses []string `json:",omitempty"`
+}
+
+// IPAMOptions represents ipam options.
+type IPAMOptions struct {
+ Driver Driver `json:",omitempty"`
+ Configs []IPAMConfig `json:",omitempty"`
+}
+
+// IPAMConfig represents ipam configuration.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ Range string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go
new file mode 100644
index 000000000..28c6851e9
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/node.go
@@ -0,0 +1,115 @@
+package swarm
+
+// Node represents a node.
+type Node struct {
+ ID string
+ Meta
+ // Spec defines the desired state of the node as specified by the user.
+ // The system will honor this and will *never* modify it.
+ Spec NodeSpec `json:",omitempty"`
+ // Description encapsulates the properties of the Node as reported by the
+ // agent.
+ Description NodeDescription `json:",omitempty"`
+ // Status provides the current status of the node, as seen by the manager.
+ Status NodeStatus `json:",omitempty"`
+ // ManagerStatus provides the current status of the node's manager
+ // component, if the node is a manager.
+ ManagerStatus *ManagerStatus `json:",omitempty"`
+}
+
+// NodeSpec represents the spec of a node.
+type NodeSpec struct {
+ Annotations
+ Role NodeRole `json:",omitempty"`
+ Availability NodeAvailability `json:",omitempty"`
+}
+
+// NodeRole represents the role of a node.
+type NodeRole string
+
+const (
+ // NodeRoleWorker WORKER
+ NodeRoleWorker NodeRole = "worker"
+ // NodeRoleManager MANAGER
+ NodeRoleManager NodeRole = "manager"
+)
+
+// NodeAvailability represents the availability of a node.
+type NodeAvailability string
+
+const (
+ // NodeAvailabilityActive ACTIVE
+ NodeAvailabilityActive NodeAvailability = "active"
+ // NodeAvailabilityPause PAUSE
+ NodeAvailabilityPause NodeAvailability = "pause"
+ // NodeAvailabilityDrain DRAIN
+ NodeAvailabilityDrain NodeAvailability = "drain"
+)
+
+// NodeDescription represents the description of a node.
+type NodeDescription struct {
+ Hostname string `json:",omitempty"`
+ Platform Platform `json:",omitempty"`
+ Resources Resources `json:",omitempty"`
+ Engine EngineDescription `json:",omitempty"`
+ TLSInfo TLSInfo `json:",omitempty"`
+}
+
+// Platform represents the platform (Arch/OS).
+type Platform struct {
+ Architecture string `json:",omitempty"`
+ OS string `json:",omitempty"`
+}
+
+// EngineDescription represents the description of an engine.
+type EngineDescription struct {
+ EngineVersion string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Plugins []PluginDescription `json:",omitempty"`
+}
+
+// PluginDescription represents the description of an engine plugin.
+type PluginDescription struct {
+ Type string `json:",omitempty"`
+ Name string `json:",omitempty"`
+}
+
+// NodeStatus represents the status of a node.
+type NodeStatus struct {
+ State NodeState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Reachability represents the reachability of a node.
+type Reachability string
+
+const (
+ // ReachabilityUnknown UNKNOWN
+ ReachabilityUnknown Reachability = "unknown"
+ // ReachabilityUnreachable UNREACHABLE
+ ReachabilityUnreachable Reachability = "unreachable"
+ // ReachabilityReachable REACHABLE
+ ReachabilityReachable Reachability = "reachable"
+)
+
+// ManagerStatus represents the status of a manager.
+type ManagerStatus struct {
+ Leader bool `json:",omitempty"`
+ Reachability Reachability `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// NodeState represents the state of a node.
+type NodeState string
+
+const (
+ // NodeStateUnknown UNKNOWN
+ NodeStateUnknown NodeState = "unknown"
+ // NodeStateDown DOWN
+ NodeStateDown NodeState = "down"
+ // NodeStateReady READY
+ NodeStateReady NodeState = "ready"
+ // NodeStateDisconnected DISCONNECTED
+ NodeStateDisconnected NodeState = "disconnected"
+)
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go
new file mode 100644
index 000000000..c4c731dc8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go
@@ -0,0 +1,19 @@
+package swarm
+
+// RuntimeType is the type of runtime used for the TaskSpec
+type RuntimeType string
+
+// RuntimeURL is the proto type url
+type RuntimeURL string
+
+const (
+ // RuntimeContainer is the container based runtime
+ RuntimeContainer RuntimeType = "container"
+ // RuntimePlugin is the plugin based runtime
+ RuntimePlugin RuntimeType = "plugin"
+
+ // RuntimeURLContainer is the proto url for the container type
+ RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
+ // RuntimeURLPlugin is the proto url for the plugin type
+ RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
+)
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
new file mode 100644
index 000000000..47ae234ef
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
@@ -0,0 +1,3 @@
+//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
+
+package runtime
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
new file mode 100644
index 000000000..1fdc9b043
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
@@ -0,0 +1,712 @@
+// Code generated by protoc-gen-gogo.
+// source: plugin.proto
+// DO NOT EDIT!
+
+/*
+ Package runtime is a generated protocol buffer package.
+
+ It is generated from these files:
+ plugin.proto
+
+ It has these top-level messages:
+ PluginSpec
+ PluginPrivilege
+*/
+package runtime
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+type PluginSpec struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
+ Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
+ Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
+}
+
+func (m *PluginSpec) Reset() { *m = PluginSpec{} }
+func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
+func (*PluginSpec) ProtoMessage() {}
+func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
+
+func (m *PluginSpec) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetRemote() string {
+ if m != nil {
+ return m.Remote
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
+ if m != nil {
+ return m.Privileges
+ }
+ return nil
+}
+
+func (m *PluginSpec) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
+}
+
+func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
+func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
+func (*PluginPrivilege) ProtoMessage() {}
+func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
+
+func (m *PluginPrivilege) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
+ proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
+}
+func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Remote) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
+ i += copy(dAtA[i:], m.Remote)
+ }
+ if len(m.Privileges) > 0 {
+ for _, msg := range m.Privileges {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.Disabled {
+ dAtA[i] = 0x20
+ i++
+ if m.Disabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Description) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
+ i += copy(dAtA[i:], m.Description)
+ }
+ if len(m.Value) > 0 {
+ for _, s := range m.Value {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ dAtA[offset+4] = uint8(v >> 32)
+ dAtA[offset+5] = uint8(v >> 40)
+ dAtA[offset+6] = uint8(v >> 48)
+ dAtA[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *PluginSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Remote)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Privileges) > 0 {
+ for _, e := range m.Privileges {
+ l = e.Size()
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ if m.Disabled {
+ n += 2
+ }
+ return n
+}
+
+func (m *PluginPrivilege) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Description)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Value) > 0 {
+ for _, s := range m.Value {
+ l = len(s)
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovPlugin(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozPlugin(x uint64) (n int) {
+ return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *PluginSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Remote = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Privileges = append(m.Privileges, &PluginPrivilege{})
+ if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Disabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipPlugin(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthPlugin
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipPlugin(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
+
+var fileDescriptorPlugin = []byte{
+ // 196 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
+ 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b,
+ 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30,
+ 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12,
+ 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35,
+ 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c,
+ 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a,
+ 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab,
+ 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
+ 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33,
+ 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
+ 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79,
+ 0x0c, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
new file mode 100644
index 000000000..06eb7ba65
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+message PluginSpec {
+ string name = 1;
+ string remote = 2;
+ repeated PluginPrivilege privileges = 3;
+ bool disabled = 4;
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+message PluginPrivilege {
+ string name = 1;
+ string description = 2;
+ repeated string value = 3;
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go
new file mode 100644
index 000000000..f9b1e9266
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go
@@ -0,0 +1,32 @@
+package swarm
+
+import "os"
+
+// Secret represents a secret.
+type Secret struct {
+ ID string
+ Meta
+ Spec SecretSpec
+}
+
+// SecretSpec represents a secret specification from a secret in swarm
+type SecretSpec struct {
+ Annotations
+ Data []byte `json:",omitempty"`
+ Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
+}
+
+// SecretReferenceFileTarget is a file target in a secret reference
+type SecretReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// SecretReference is a reference to a secret in swarm
+type SecretReference struct {
+ File *SecretReferenceFileTarget
+ SecretID string
+ SecretName string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go
new file mode 100644
index 000000000..fa31a7ec8
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/service.go
@@ -0,0 +1,124 @@
+package swarm
+
+import "time"
+
+// Service represents a service.
+type Service struct {
+ ID string
+ Meta
+ Spec ServiceSpec `json:",omitempty"`
+ PreviousSpec *ServiceSpec `json:",omitempty"`
+ Endpoint Endpoint `json:",omitempty"`
+ UpdateStatus *UpdateStatus `json:",omitempty"`
+}
+
+// ServiceSpec represents the spec of a service.
+type ServiceSpec struct {
+ Annotations
+
+ // TaskTemplate defines how the service should construct new tasks when
+ // orchestrating this service.
+ TaskTemplate TaskSpec `json:",omitempty"`
+ Mode ServiceMode `json:",omitempty"`
+ UpdateConfig *UpdateConfig `json:",omitempty"`
+ RollbackConfig *UpdateConfig `json:",omitempty"`
+
+ // Networks field in ServiceSpec is deprecated. The
+ // same field in TaskSpec should be used instead.
+ // This field will be removed in a future release.
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+ EndpointSpec *EndpointSpec `json:",omitempty"`
+}
+
+// ServiceMode represents the mode of a service.
+type ServiceMode struct {
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+}
+
+// UpdateState is the state of a service update.
+type UpdateState string
+
+const (
+ // UpdateStateUpdating is the updating state.
+ UpdateStateUpdating UpdateState = "updating"
+ // UpdateStatePaused is the paused state.
+ UpdateStatePaused UpdateState = "paused"
+ // UpdateStateCompleted is the completed state.
+ UpdateStateCompleted UpdateState = "completed"
+ // UpdateStateRollbackStarted is the state with a rollback in progress.
+ UpdateStateRollbackStarted UpdateState = "rollback_started"
+ // UpdateStateRollbackPaused is the state with a rollback in progress.
+ UpdateStateRollbackPaused UpdateState = "rollback_paused"
+ // UpdateStateRollbackCompleted is the state with a rollback in progress.
+ UpdateStateRollbackCompleted UpdateState = "rollback_completed"
+)
+
+// UpdateStatus reports the status of a service update.
+type UpdateStatus struct {
+ State UpdateState `json:",omitempty"`
+ StartedAt *time.Time `json:",omitempty"`
+ CompletedAt *time.Time `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// ReplicatedService is a kind of ServiceMode.
+type ReplicatedService struct {
+ Replicas *uint64 `json:",omitempty"`
+}
+
+// GlobalService is a kind of ServiceMode.
+type GlobalService struct{}
+
+const (
+ // UpdateFailureActionPause PAUSE
+ UpdateFailureActionPause = "pause"
+ // UpdateFailureActionContinue CONTINUE
+ UpdateFailureActionContinue = "continue"
+ // UpdateFailureActionRollback ROLLBACK
+ UpdateFailureActionRollback = "rollback"
+
+ // UpdateOrderStopFirst STOP_FIRST
+ UpdateOrderStopFirst = "stop-first"
+ // UpdateOrderStartFirst START_FIRST
+ UpdateOrderStartFirst = "start-first"
+)
+
+// UpdateConfig represents the update configuration.
+type UpdateConfig struct {
+ // Maximum number of tasks to be updated in one iteration.
+ // 0 means unlimited parallelism.
+ Parallelism uint64
+
+ // Amount of time between updates.
+ Delay time.Duration `json:",omitempty"`
+
+ // FailureAction is the action to take when an update failures.
+ FailureAction string `json:",omitempty"`
+
+ // Monitor indicates how long to monitor a task for failure after it is
+ // created. If the task fails by ending up in one of the states
+ // REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
+ // this counts as a failure. If it fails after Monitor, it does not
+ // count as a failure. If Monitor is unspecified, a default value will
+ // be used.
+ Monitor time.Duration `json:",omitempty"`
+
+ // MaxFailureRatio is the fraction of tasks that may fail during
+ // an update before the failure action is invoked. Any task created by
+ // the current update which ends up in one of the states REJECTED,
+ // COMPLETED or FAILED within Monitor from its creation counts as a
+ // failure. The number of failures is divided by the number of tasks
+ // being updated, and if this fraction is greater than
+ // MaxFailureRatio, the failure action is invoked.
+ //
+ // If the failure action is CONTINUE, there is no effect.
+ // If the failure action is PAUSE, no more tasks will be updated until
+ // another update is started.
+ MaxFailureRatio float32
+
+ // Order indicates the order of operations when rolling out an updated
+ // task. Either the old task is shut down before the new task is
+ // started, or the new task is started before the old task is shut down.
+ Order string
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
new file mode 100644
index 000000000..b65fa86da
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -0,0 +1,217 @@
+package swarm
+
+import "time"
+
+// ClusterInfo represents info about the cluster for outputting in "info"
+// it contains the same information as "Swarm", but without the JoinTokens
+type ClusterInfo struct {
+ ID string
+ Meta
+ Spec Spec
+ TLSInfo TLSInfo
+ RootRotationInProgress bool
+}
+
+// Swarm represents a swarm.
+type Swarm struct {
+ ClusterInfo
+ JoinTokens JoinTokens
+}
+
+// JoinTokens contains the tokens workers and managers need to join the swarm.
+type JoinTokens struct {
+ // Worker is the join token workers may use to join the swarm.
+ Worker string
+ // Manager is the join token managers may use to join the swarm.
+ Manager string
+}
+
+// Spec represents the spec of a swarm.
+type Spec struct {
+ Annotations
+
+ Orchestration OrchestrationConfig `json:",omitempty"`
+ Raft RaftConfig `json:",omitempty"`
+ Dispatcher DispatcherConfig `json:",omitempty"`
+ CAConfig CAConfig `json:",omitempty"`
+ TaskDefaults TaskDefaults `json:",omitempty"`
+ EncryptionConfig EncryptionConfig `json:",omitempty"`
+}
+
+// OrchestrationConfig represents orchestration configuration.
+type OrchestrationConfig struct {
+ // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
+ // node. If negative, never remove completed or failed tasks.
+ TaskHistoryRetentionLimit *int64 `json:",omitempty"`
+}
+
+// TaskDefaults parameterizes cluster-level task creation with default values.
+type TaskDefaults struct {
+ // LogDriver selects the log driver to use for tasks created in the
+ // orchestrator if unspecified by a service.
+ //
+ // Updating this value will only have an affect on new tasks. Old tasks
+ // will continue use their previously configured log driver until
+ // recreated.
+ LogDriver *Driver `json:",omitempty"`
+}
+
+// EncryptionConfig controls at-rest encryption of data and keys.
+type EncryptionConfig struct {
+ // AutoLockManagers specifies whether or not managers TLS keys and raft data
+ // should be encrypted at rest in such a way that they must be unlocked
+ // before the manager node starts up again.
+ AutoLockManagers bool
+}
+
+// RaftConfig represents raft configuration.
+type RaftConfig struct {
+ // SnapshotInterval is the number of log entries between snapshots.
+ SnapshotInterval uint64 `json:",omitempty"`
+
+ // KeepOldSnapshots is the number of snapshots to keep beyond the
+ // current snapshot.
+ KeepOldSnapshots *uint64 `json:",omitempty"`
+
+ // LogEntriesForSlowFollowers is the number of log entries to keep
+ // around to sync up slow followers after a snapshot is created.
+ LogEntriesForSlowFollowers uint64 `json:",omitempty"`
+
+ // ElectionTick is the number of ticks that a follower will wait for a message
+ // from the leader before becoming a candidate and starting an election.
+ // ElectionTick must be greater than HeartbeatTick.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ ElectionTick int
+
+ // HeartbeatTick is the number of ticks between heartbeats. Every
+ // HeartbeatTick ticks, the leader will send a heartbeat to the
+ // followers.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ HeartbeatTick int
+}
+
+// DispatcherConfig represents dispatcher configuration.
+type DispatcherConfig struct {
+ // HeartbeatPeriod defines how often agent should send heartbeats to
+ // dispatcher.
+ HeartbeatPeriod time.Duration `json:",omitempty"`
+}
+
+// CAConfig represents CA configuration.
+type CAConfig struct {
+ // NodeCertExpiry is the duration certificates should be issued for
+ NodeCertExpiry time.Duration `json:",omitempty"`
+
+ // ExternalCAs is a list of CAs to which a manager node will make
+ // certificate signing requests for node certificates.
+ ExternalCAs []*ExternalCA `json:",omitempty"`
+
+ // SigningCACert and SigningCAKey specify the desired signing root CA and
+ // root CA key for the swarm. When inspecting the cluster, the key will
+ // be redacted.
+ SigningCACert string `json:",omitempty"`
+ SigningCAKey string `json:",omitempty"`
+
+ // If this value changes, and there is no specified signing cert and key,
+ // then the swarm is forced to generate a new root certificate ane key.
+ ForceRotate uint64 `json:",omitempty"`
+}
+
+// ExternalCAProtocol represents type of external CA.
+type ExternalCAProtocol string
+
+// ExternalCAProtocolCFSSL CFSSL
+const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
+
+// ExternalCA defines external CA to be used by the cluster.
+type ExternalCA struct {
+ // Protocol is the protocol used by this external CA.
+ Protocol ExternalCAProtocol
+
+ // URL is the URL where the external CA can be reached.
+ URL string
+
+ // Options is a set of additional key/value pairs whose interpretation
+ // depends on the specified CA type.
+ Options map[string]string `json:",omitempty"`
+
+ // CACert specifies which root CA is used by this external CA. This certificate must
+ // be in PEM format.
+ CACert string
+}
+
+// InitRequest is the request used to init a swarm.
+type InitRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ ForceNewCluster bool
+ Spec Spec
+ AutoLockManagers bool
+ Availability NodeAvailability
+}
+
+// JoinRequest is the request used to join a swarm.
+type JoinRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ RemoteAddrs []string
+ JoinToken string // accept by secret
+ Availability NodeAvailability
+}
+
+// UnlockRequest is the request used to unlock a swarm.
+type UnlockRequest struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// LocalNodeState represents the state of the local node.
+type LocalNodeState string
+
+const (
+ // LocalNodeStateInactive INACTIVE
+ LocalNodeStateInactive LocalNodeState = "inactive"
+ // LocalNodeStatePending PENDING
+ LocalNodeStatePending LocalNodeState = "pending"
+ // LocalNodeStateActive ACTIVE
+ LocalNodeStateActive LocalNodeState = "active"
+ // LocalNodeStateError ERROR
+ LocalNodeStateError LocalNodeState = "error"
+ // LocalNodeStateLocked LOCKED
+ LocalNodeStateLocked LocalNodeState = "locked"
+)
+
+// Info represents generic information about swarm.
+type Info struct {
+ NodeID string
+ NodeAddr string
+
+ LocalNodeState LocalNodeState
+ ControlAvailable bool
+ Error string
+
+ RemoteManagers []Peer
+ Nodes int `json:",omitempty"`
+ Managers int `json:",omitempty"`
+
+ Cluster *ClusterInfo `json:",omitempty"`
+}
+
+// Peer represents a peer.
+type Peer struct {
+ NodeID string
+ Addr string
+}
+
+// UpdateFlags contains flags for SwarmUpdate.
+type UpdateFlags struct {
+ RotateWorkerToken bool
+ RotateManagerToken bool
+ RotateManagerUnlockKey bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go
new file mode 100644
index 000000000..ff11b07e7
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/task.go
@@ -0,0 +1,184 @@
+package swarm
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/swarm/runtime"
+)
+
+// TaskState represents the state of a task.
+type TaskState string
+
+const (
+ // TaskStateNew NEW
+ TaskStateNew TaskState = "new"
+ // TaskStateAllocated ALLOCATED
+ TaskStateAllocated TaskState = "allocated"
+ // TaskStatePending PENDING
+ TaskStatePending TaskState = "pending"
+ // TaskStateAssigned ASSIGNED
+ TaskStateAssigned TaskState = "assigned"
+ // TaskStateAccepted ACCEPTED
+ TaskStateAccepted TaskState = "accepted"
+ // TaskStatePreparing PREPARING
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateReady READY
+ TaskStateReady TaskState = "ready"
+ // TaskStateStarting STARTING
+ TaskStateStarting TaskState = "starting"
+ // TaskStateRunning RUNNING
+ TaskStateRunning TaskState = "running"
+ // TaskStateComplete COMPLETE
+ TaskStateComplete TaskState = "complete"
+ // TaskStateShutdown SHUTDOWN
+ TaskStateShutdown TaskState = "shutdown"
+ // TaskStateFailed FAILED
+ TaskStateFailed TaskState = "failed"
+ // TaskStateRejected REJECTED
+ TaskStateRejected TaskState = "rejected"
+)
+
+// Task represents a task.
+type Task struct {
+ ID string
+ Meta
+ Annotations
+
+ Spec TaskSpec `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ Slot int `json:",omitempty"`
+ NodeID string `json:",omitempty"`
+ Status TaskStatus `json:",omitempty"`
+ DesiredState TaskState `json:",omitempty"`
+ NetworksAttachments []NetworkAttachment `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+}
+
+// TaskSpec represents the spec of a task.
+type TaskSpec struct {
+ // ContainerSpec and PluginSpec are mutually exclusive.
+ // PluginSpec will only be used when the `Runtime` field is set to `plugin`
+ ContainerSpec *ContainerSpec `json:",omitempty"`
+ PluginSpec *runtime.PluginSpec `json:",omitempty"`
+
+ Resources *ResourceRequirements `json:",omitempty"`
+ RestartPolicy *RestartPolicy `json:",omitempty"`
+ Placement *Placement `json:",omitempty"`
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+
+ // LogDriver specifies the LogDriver to use for tasks created from this
+ // spec. If not present, the one on cluster default on swarm.Spec will be
+ // used, finally falling back to the engine default if not specified.
+ LogDriver *Driver `json:",omitempty"`
+
+ // ForceUpdate is a counter that triggers an update even if no relevant
+ // parameters have been changed.
+ ForceUpdate uint64
+
+ Runtime RuntimeType `json:",omitempty"`
+}
+
+// Resources represents resources (CPU/Memory).
+type Resources struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+}
+
+// GenericResource represents a "user defined" resource which can
+// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
+type GenericResource struct {
+ NamedResourceSpec *NamedGenericResource `json:",omitempty"`
+ DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"`
+}
+
+// NamedGenericResource represents a "user defined" resource which is defined
+// as a string.
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
+type NamedGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value string `json:",omitempty"`
+}
+
+// DiscreteGenericResource represents a "user defined" resource which is defined
+// as an integer
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to count the resource (SSD=5, HDD=3, ...)
+type DiscreteGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value int64 `json:",omitempty"`
+}
+
+// ResourceRequirements represents resources requirements.
+type ResourceRequirements struct {
+ Limits *Resources `json:",omitempty"`
+ Reservations *Resources `json:",omitempty"`
+}
+
+// Placement represents orchestration parameters.
+type Placement struct {
+ Constraints []string `json:",omitempty"`
+ Preferences []PlacementPreference `json:",omitempty"`
+
+ // Platforms stores all the platforms that the image can run on.
+ // This field is used in the platform filter for scheduling. If empty,
+ // then the platform filter is off, meaning there are no scheduling restrictions.
+ Platforms []Platform `json:",omitempty"`
+}
+
+// PlacementPreference provides a way to make the scheduler aware of factors
+// such as topology.
+type PlacementPreference struct {
+ Spread *SpreadOver
+}
+
+// SpreadOver is a scheduling preference that instructs the scheduler to spread
+// tasks evenly over groups of nodes identified by labels.
+type SpreadOver struct {
+ // label descriptor, such as engine.labels.az
+ SpreadDescriptor string
+}
+
+// RestartPolicy represents the restart policy.
+type RestartPolicy struct {
+ Condition RestartPolicyCondition `json:",omitempty"`
+ Delay *time.Duration `json:",omitempty"`
+ MaxAttempts *uint64 `json:",omitempty"`
+ Window *time.Duration `json:",omitempty"`
+}
+
+// RestartPolicyCondition represents when to restart.
+type RestartPolicyCondition string
+
+const (
+ // RestartPolicyConditionNone NONE
+ RestartPolicyConditionNone RestartPolicyCondition = "none"
+ // RestartPolicyConditionOnFailure ON_FAILURE
+ RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
+ // RestartPolicyConditionAny ANY
+ RestartPolicyConditionAny RestartPolicyCondition = "any"
+)
+
+// TaskStatus represents the status of a task.
+type TaskStatus struct {
+ Timestamp time.Time `json:",omitempty"`
+ State TaskState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Err string `json:",omitempty"`
+ ContainerStatus ContainerStatus `json:",omitempty"`
+ PortStatus PortStatus `json:",omitempty"`
+}
+
+// ContainerStatus represents the status of a container.
+type ContainerStatus struct {
+ ContainerID string `json:",omitempty"`
+ PID int `json:",omitempty"`
+ ExitCode int `json:",omitempty"`
+}
+
+// PortStatus represents the port status of a task's host ports whose
+// service has published host ports
+type PortStatus struct {
+ Ports []PortConfig `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go
new file mode 100644
index 000000000..63e1eec19
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/time/duration_convert.go
@@ -0,0 +1,12 @@
+package time
+
+import (
+ "strconv"
+ "time"
+)
+
+// DurationToSecondsString converts the specified duration to the number
+// seconds it represents, formatted as a string.
+func DurationToSecondsString(duration time.Duration) string {
+ return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
+}
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
new file mode 100644
index 000000000..9aa9702da
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go
@@ -0,0 +1,124 @@
+package time
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// These are additional predefined layouts for use in Time.Format and Time.Parse
+// with --since and --until parameters for `docker logs` and `docker events`
+const (
+ rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
+ rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
+ dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
+ dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
+)
+
+// GetTimestamp tries to parse given string as golang duration,
+// then RFC3339 time and finally as a Unix timestamp. If
+// any of these were successful, it returns a Unix timestamp
+// as string otherwise returns the given value back.
+// In case of duration input, the returned timestamp is computed
+// as the given reference time minus the amount of the duration.
+func GetTimestamp(value string, reference time.Time) (string, error) {
+ if d, err := time.ParseDuration(value); value != "0" && err == nil {
+ return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
+ }
+
+ var format string
+ var parseInLocation bool
+
+ // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
+ parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+
+ if strings.Contains(value, ".") {
+ if parseInLocation {
+ format = rFC3339NanoLocal
+ } else {
+ format = time.RFC3339Nano
+ }
+ } else if strings.Contains(value, "T") {
+ // we want the number of colons in the T portion of the timestamp
+ tcolons := strings.Count(value, ":")
+ // if parseInLocation is off and we have a +/- zone offset (not Z) then
+ // there will be an extra colon in the input for the tz offset subtract that
+ // colon from the tcolons count
+ if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
+ tcolons--
+ }
+ if parseInLocation {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15"
+ case 1:
+ format = "2006-01-02T15:04"
+ default:
+ format = rFC3339Local
+ }
+ } else {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15Z07:00"
+ case 1:
+ format = "2006-01-02T15:04Z07:00"
+ default:
+ format = time.RFC3339
+ }
+ }
+ } else if parseInLocation {
+ format = dateLocal
+ } else {
+ format = dateWithZone
+ }
+
+ var t time.Time
+ var err error
+
+ if parseInLocation {
+ t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
+ } else {
+ t, err = time.Parse(format, value)
+ }
+
+ if err != nil {
+ // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp
+ if strings.Contains(value, "-") {
+ return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
+ }
+ return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
+ }
+
+ return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
+}
+
+// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
+// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
+// if the incoming nanosecond portion is longer or shorter than 9 digits it is
+// converted to nanoseconds. The expectation is that the seconds and
+// seconds will be used to create a time variable. For example:
+// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
+// if err == nil since := time.Unix(seconds, nanoseconds)
+// returns seconds as def(aultSeconds) if value == ""
+func ParseTimestamps(value string, def int64) (int64, int64, error) {
+ if value == "" {
+ return def, 0, nil
+ }
+ sa := strings.SplitN(value, ".", 2)
+ s, err := strconv.ParseInt(sa[0], 10, 64)
+ if err != nil {
+ return s, 0, err
+ }
+ if len(sa) != 2 {
+ return s, 0, nil
+ }
+ n, err := strconv.ParseInt(sa[1], 10, 64)
+ if err != nil {
+ return s, n, err
+ }
+ // should already be in nanoseconds but just in case convert n to nanoseconds
+ n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
+ return s, n, nil
+}
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
new file mode 100644
index 000000000..f7ac77297
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -0,0 +1,575 @@
+package types
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/go-connections/nat"
+)
+
+// RootFS returns Image's RootFS description including the layer IDs.
+type RootFS struct {
+ Type string
+ Layers []string `json:",omitempty"`
+ BaseLayer string `json:",omitempty"`
+}
+
+// ImageInspect contains response of Engine API:
+// GET "/images/{name:.*}/json"
+type ImageInspect struct {
+ ID string `json:"Id"`
+ RepoTags []string
+ RepoDigests []string
+ Parent string
+ Comment string
+ Created string
+ Container string
+ ContainerConfig *container.Config
+ DockerVersion string
+ Author string
+ Config *container.Config
+ Architecture string
+ Os string
+ OsVersion string `json:",omitempty"`
+ Size int64
+ VirtualSize int64
+ GraphDriver GraphDriverData
+ RootFS RootFS
+ Metadata ImageMetadata
+}
+
+// ImageMetadata contains engine-local data about the image
+type ImageMetadata struct {
+ LastTagTime time.Time `json:",omitempty"`
+}
+
+// Container contains response of Engine API:
+// GET "/containers/json"
+type Container struct {
+ ID string `json:"Id"`
+ Names []string
+ Image string
+ ImageID string
+ Command string
+ Created int64
+ Ports []Port
+ SizeRw int64 `json:",omitempty"`
+ SizeRootFs int64 `json:",omitempty"`
+ Labels map[string]string
+ State string
+ Status string
+ HostConfig struct {
+ NetworkMode string `json:",omitempty"`
+ }
+ NetworkSettings *SummaryNetworkSettings
+ Mounts []MountPoint
+}
+
+// CopyConfig contains request body of Engine API:
+// POST "/containers/"+containerID+"/copy"
+type CopyConfig struct {
+ Resource string
+}
+
+// ContainerPathStat is used to encode the header from
+// GET "/containers/{name:.*}/archive"
+// "Name" is the file or directory name.
+type ContainerPathStat struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Mode os.FileMode `json:"mode"`
+ Mtime time.Time `json:"mtime"`
+ LinkTarget string `json:"linkTarget"`
+}
+
+// ContainerStats contains response of Engine API:
+// GET "/stats"
+type ContainerStats struct {
+ Body io.ReadCloser `json:"body"`
+ OSType string `json:"ostype"`
+}
+
+// Ping contains response of Engine API:
+// GET "/_ping"
+type Ping struct {
+ APIVersion string
+ OSType string
+ Experimental bool
+}
+
+// Version contains response of Engine API:
+// GET "/version"
+type Version struct {
+ Version string
+ APIVersion string `json:"ApiVersion"`
+ MinAPIVersion string `json:"MinAPIVersion,omitempty"`
+ GitCommit string
+ GoVersion string
+ Os string
+ Arch string
+ KernelVersion string `json:",omitempty"`
+ Experimental bool `json:",omitempty"`
+ BuildTime string `json:",omitempty"`
+}
+
+// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
+// in the version-string of external tools, such as containerd, or runC.
+type Commit struct {
+ ID string // ID is the actual commit ID of external tool.
+ Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
+}
+
+// Info contains response of Engine API:
+// GET "/info"
+type Info struct {
+ ID string
+ Containers int
+ ContainersRunning int
+ ContainersPaused int
+ ContainersStopped int
+ Images int
+ Driver string
+ DriverStatus [][2]string
+ SystemStatus [][2]string
+ Plugins PluginsInfo
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
+ Debug bool
+ NFd int
+ OomKillDisable bool
+ NGoroutines int
+ SystemTime string
+ LoggingDriver string
+ CgroupDriver string
+ NEventsListener int
+ KernelVersion string
+ OperatingSystem string
+ OSType string
+ Architecture string
+ IndexServerAddress string
+ RegistryConfig *registry.ServiceConfig
+ NCPU int
+ MemTotal int64
+ GenericResources []swarm.GenericResource
+ DockerRootDir string
+ HTTPProxy string `json:"HttpProxy"`
+ HTTPSProxy string `json:"HttpsProxy"`
+ NoProxy string
+ Name string
+ Labels []string
+ ExperimentalBuild bool
+ ServerVersion string
+ ClusterStore string
+ ClusterAdvertise string
+ Runtimes map[string]Runtime
+ DefaultRuntime string
+ Swarm swarm.Info
+ // LiveRestoreEnabled determines whether containers should be kept
+ // running when the daemon is shutdown or upon daemon start if
+ // running containers are detected
+ LiveRestoreEnabled bool
+ Isolation container.Isolation
+ InitBinary string
+ ContainerdCommit Commit
+ RuncCommit Commit
+ InitCommit Commit
+ SecurityOptions []string
+}
+
+// KeyValue holds a key/value pair
+type KeyValue struct {
+ Key, Value string
+}
+
+// SecurityOpt contains the name and options of a security option
+type SecurityOpt struct {
+ Name string
+ Options []KeyValue
+}
+
+// DecodeSecurityOptions decodes a security options string slice to a type safe
+// SecurityOpt
+func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
+ so := []SecurityOpt{}
+ for _, opt := range opts {
+ // support output from a < 1.13 docker daemon
+ if !strings.Contains(opt, "=") {
+ so = append(so, SecurityOpt{Name: opt})
+ continue
+ }
+ secopt := SecurityOpt{}
+ split := strings.Split(opt, ",")
+ for _, s := range split {
+ kv := strings.SplitN(s, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("invalid security option %q", s)
+ }
+ if kv[0] == "" || kv[1] == "" {
+ return nil, errors.New("invalid empty security option")
+ }
+ if kv[0] == "name" {
+ secopt.Name = kv[1]
+ continue
+ }
+ secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
+ }
+ so = append(so, secopt)
+ }
+ return so, nil
+}
+
+// PluginsInfo is a temp struct holding Plugins name
+// registered with docker daemon. It is used by Info struct
+type PluginsInfo struct {
+ // List of Volume plugins registered
+ Volume []string
+ // List of Network plugins registered
+ Network []string
+ // List of Authorization plugins registered
+ Authorization []string
+ // List of Log plugins registered
+ Log []string
+}
+
+// ExecStartCheck is a temp struct used by execStart
+// Config fields is part of ExecConfig in runconfig package
+type ExecStartCheck struct {
+ // ExecStart will first check if it's detached
+ Detach bool
+ // Check if there's a tty
+ Tty bool
+}
+
+// HealthcheckResult stores information about a single run of a healthcheck probe
+type HealthcheckResult struct {
+ Start time.Time // Start is the time this check started
+ End time.Time // End is the time this check ended
+ ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
+ Output string // Output from last check
+}
+
+// Health states
+const (
+ NoHealthcheck = "none" // Indicates there is no healthcheck
+ Starting = "starting" // Starting indicates that the container is not yet ready
+ Healthy = "healthy" // Healthy indicates that the container is running correctly
+ Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
+)
+
+// Health stores information about the container's healthcheck results
+type Health struct {
+ Status string // Status is one of Starting, Healthy or Unhealthy
+ FailingStreak int // FailingStreak is the number of consecutive failures
+ Log []*HealthcheckResult // Log contains the last few results (oldest first)
+}
+
+// ContainerState stores container's running state
+// it's part of ContainerJSONBase and will return by "inspect" command
+type ContainerState struct {
+ Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
+ Running bool
+ Paused bool
+ Restarting bool
+ OOMKilled bool
+ Dead bool
+ Pid int
+ ExitCode int
+ Error string
+ StartedAt string
+ FinishedAt string
+ Health *Health `json:",omitempty"`
+}
+
+// ContainerNode stores information about the node that a container
+// is running on. It's only available in Docker Swarm
+type ContainerNode struct {
+ ID string
+ IPAddress string `json:"IP"`
+ Addr string
+ Name string
+ Cpus int
+ Memory int64
+ Labels map[string]string
+}
+
+// ContainerJSONBase contains response of Engine API:
+// GET "/containers/{name:.*}/json"
+type ContainerJSONBase struct {
+ ID string `json:"Id"`
+ Created string
+ Path string
+ Args []string
+ State *ContainerState
+ Image string
+ ResolvConfPath string
+ HostnamePath string
+ HostsPath string
+ LogPath string
+ Node *ContainerNode `json:",omitempty"`
+ Name string
+ RestartCount int
+ Driver string
+ Platform string
+ MountLabel string
+ ProcessLabel string
+ AppArmorProfile string
+ ExecIDs []string
+ HostConfig *container.HostConfig
+ GraphDriver GraphDriverData
+ SizeRw *int64 `json:",omitempty"`
+ SizeRootFs *int64 `json:",omitempty"`
+}
+
+// ContainerJSON is newly used struct along with MountPoint
+type ContainerJSON struct {
+ *ContainerJSONBase
+ Mounts []MountPoint
+ Config *container.Config
+ NetworkSettings *NetworkSettings
+}
+
+// NetworkSettings exposes the network settings in the api
+type NetworkSettings struct {
+ NetworkSettingsBase
+ DefaultNetworkSettings
+ Networks map[string]*network.EndpointSettings
+}
+
+// SummaryNetworkSettings provides a summary of container's networks
+// in /containers/json
+type SummaryNetworkSettings struct {
+ Networks map[string]*network.EndpointSettings
+}
+
+// NetworkSettingsBase holds basic information about networks
+type NetworkSettingsBase struct {
+ Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
+ SandboxID string // SandboxID uniquely represents a container's network stack
+ HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
+ LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
+ Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
+ SandboxKey string // SandboxKey identifies the sandbox
+ SecondaryIPAddresses []network.Address
+ SecondaryIPv6Addresses []network.Address
+}
+
+// DefaultNetworkSettings holds network information
+// during the 2 release deprecation period.
+// It will be removed in Docker 1.11.
+type DefaultNetworkSettings struct {
+ EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
+ Gateway string // Gateway holds the gateway address for the network
+ GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
+ GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
+ IPAddress string // IPAddress holds the IPv4 address for the network
+ IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
+ IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
+ MacAddress string // MacAddress holds the MAC address for the network
+}
+
+// MountPoint represents a mount point configuration inside the container.
+// This is used for reporting the mountpoints in use by a container.
+type MountPoint struct {
+ Type mount.Type `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Source string
+ Destination string
+ Driver string `json:",omitempty"`
+ Mode string
+ RW bool
+ Propagation mount.Propagation
+}
+
+// NetworkResource is the body of the "get network" http response message
+type NetworkResource struct {
+ Name string // Name is the requested name of the network
+ ID string `json:"Id"` // ID uniquely identifies a network on a single machine
+ Created time.Time // Created is the time the network created
+ Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
+ Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
+ EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
+ IPAM network.IPAM // IPAM is the network's IP Address Management
+ Internal bool // Internal represents if the network is used internal only
+ Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
+ Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
+ ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
+ ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
+ Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
+ Options map[string]string // Options holds the network specific options to use for when creating the network
+ Labels map[string]string // Labels holds metadata specific to the network being created
+ Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
+ Services map[string]network.ServiceInfo `json:",omitempty"`
+}
+
+// EndpointResource contains network resources allocated and used for a container in a network
+type EndpointResource struct {
+ Name string
+ EndpointID string
+ MacAddress string
+ IPv4Address string
+ IPv6Address string
+}
+
+// NetworkCreate is the expected body of the "create network" http request message
+type NetworkCreate struct {
+ // Check for networks with duplicate names.
+ // Network is primarily keyed based on a random ID and not on the name.
+ // Network name is strictly a user-friendly alias to the network
+ // which is uniquely identified using ID.
+ // And there is no guaranteed way to check for duplicates.
+ // Option CheckDuplicate is there to provide a best effort checking of any networks
+ // which has the same name but it is not guaranteed to catch all name collisions.
+ CheckDuplicate bool
+ Driver string
+ Scope string
+ EnableIPv6 bool
+ IPAM *network.IPAM
+ Internal bool
+ Attachable bool
+ Ingress bool
+ ConfigOnly bool
+ ConfigFrom *network.ConfigReference
+ Options map[string]string
+ Labels map[string]string
+}
+
+// NetworkCreateRequest is the request message sent to the server for network create call.
+type NetworkCreateRequest struct {
+ NetworkCreate
+ Name string
+}
+
+// NetworkCreateResponse is the response message sent by the server for network create call
+type NetworkCreateResponse struct {
+ ID string `json:"Id"`
+ Warning string
+}
+
+// NetworkConnect represents the data to be used to connect a container to the network
+type NetworkConnect struct {
+ Container string
+ EndpointConfig *network.EndpointSettings `json:",omitempty"`
+}
+
+// NetworkDisconnect represents the data to be used to disconnect a container from the network
+type NetworkDisconnect struct {
+ Container string
+ Force bool
+}
+
+// NetworkInspectOptions holds parameters to inspect network
+type NetworkInspectOptions struct {
+ Scope string
+ Verbose bool
+}
+
+// Checkpoint represents the details of a checkpoint
+type Checkpoint struct {
+ Name string // Name is the name of the checkpoint
+}
+
+// Runtime describes an OCI runtime
+type Runtime struct {
+ Path string `json:"path"`
+ Args []string `json:"runtimeArgs,omitempty"`
+}
+
+// DiskUsage contains response of Engine API:
+// GET "/system/df"
+type DiskUsage struct {
+ LayersSize int64
+ Images []*ImageSummary
+ Containers []*Container
+ Volumes []*Volume
+ BuilderSize int64
+}
+
+// ContainersPruneReport contains the response for Engine API:
+// POST "/containers/prune"
+type ContainersPruneReport struct {
+ ContainersDeleted []string
+ SpaceReclaimed uint64
+}
+
+// VolumesPruneReport contains the response for Engine API:
+// POST "/volumes/prune"
+type VolumesPruneReport struct {
+ VolumesDeleted []string
+ SpaceReclaimed uint64
+}
+
+// ImagesPruneReport contains the response for Engine API:
+// POST "/images/prune"
+type ImagesPruneReport struct {
+ ImagesDeleted []ImageDeleteResponseItem
+ SpaceReclaimed uint64
+}
+
+// BuildCachePruneReport contains the response for Engine API:
+// POST "/build/prune"
+type BuildCachePruneReport struct {
+ SpaceReclaimed uint64
+}
+
+// NetworksPruneReport contains the response for Engine API:
+// POST "/networks/prune"
+type NetworksPruneReport struct {
+ NetworksDeleted []string
+}
+
+// SecretCreateResponse contains the information returned to a client
+// on the creation of a new secret.
+type SecretCreateResponse struct {
+ // ID is the id of the created secret.
+ ID string
+}
+
+// SecretListOptions holds parameters to list secrets
+type SecretListOptions struct {
+ Filters filters.Args
+}
+
+// ConfigCreateResponse contains the information returned to a client
+// on the creation of a new config.
+type ConfigCreateResponse struct {
+ // ID is the id of the created config.
+ ID string
+}
+
+// ConfigListOptions holds parameters to list configs
+type ConfigListOptions struct {
+ Filters filters.Args
+}
+
+// PushResult contains the tag, manifest digest, and manifest size from the
+// push. It's used to signal this information to the trust code in the client
+// so it can sign the manifest if necessary.
+type PushResult struct {
+ Tag string
+ Digest string
+ Size int
+}
+
+// BuildResult contains the image id of a successful build
+type BuildResult struct {
+ ID string
+}
diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md
new file mode 100644
index 000000000..1ef911edb
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/versions/README.md
@@ -0,0 +1,14 @@
+# Legacy API type versions
+
+This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
+
+Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
+
+## Package name conventions
+
+The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
+
+1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
+2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
+
+For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go
new file mode 100644
index 000000000..611d4fed6
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/versions/compare.go
@@ -0,0 +1,62 @@
+package versions
+
+import (
+ "strconv"
+ "strings"
+)
+
+// compare compares two version strings
+// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
+func compare(v1, v2 string) int {
+ var (
+ currTab = strings.Split(v1, ".")
+ otherTab = strings.Split(v2, ".")
+ )
+
+ max := len(currTab)
+ if len(otherTab) > max {
+ max = len(otherTab)
+ }
+ for i := 0; i < max; i++ {
+ var currInt, otherInt int
+
+ if len(currTab) > i {
+ currInt, _ = strconv.Atoi(currTab[i])
+ }
+ if len(otherTab) > i {
+ otherInt, _ = strconv.Atoi(otherTab[i])
+ }
+ if currInt > otherInt {
+ return 1
+ }
+ if otherInt > currInt {
+ return -1
+ }
+ }
+ return 0
+}
+
+// LessThan checks if a version is less than another
+func LessThan(v, other string) bool {
+ return compare(v, other) == -1
+}
+
+// LessThanOrEqualTo checks if a version is less than or equal to another
+func LessThanOrEqualTo(v, other string) bool {
+ return compare(v, other) <= 0
+}
+
+// GreaterThan checks if a version is greater than another
+func GreaterThan(v, other string) bool {
+ return compare(v, other) == 1
+}
+
+// GreaterThanOrEqualTo checks if a version is greater than or equal to another
+func GreaterThanOrEqualTo(v, other string) bool {
+ return compare(v, other) >= 0
+}
+
+// Equal checks if a version is equal to another
+func Equal(v, other string) bool {
+ return compare(v, other) == 0
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go
new file mode 100644
index 000000000..b5ee96a50
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume.go
@@ -0,0 +1,69 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Volume volume
+// swagger:model Volume
+type Volume struct {
+
+ // Date/Time the volume was created.
+ CreatedAt string `json:"CreatedAt,omitempty"`
+
+ // Name of the volume driver used by the volume.
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // Mount path of the volume on the host.
+ // Required: true
+ Mountpoint string `json:"Mountpoint"`
+
+ // Name of the volume.
+ // Required: true
+ Name string `json:"Name"`
+
+ // The driver specific options used when creating the volume.
+ // Required: true
+ Options map[string]string `json:"Options"`
+
+ // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
+ // Required: true
+ Scope string `json:"Scope"`
+
+ // Low-level details about the volume, provided by the volume driver.
+ // Details are returned as a map with key/value pairs:
+ // `{"key":"value","key2":"value2"}`.
+ //
+ // The `Status` field is optional, and is omitted if the volume driver
+ // does not support this feature.
+ //
+ Status map[string]interface{} `json:"Status,omitempty"`
+
+ // usage data
+ UsageData *VolumeUsageData `json:"UsageData,omitempty"`
+}
+
+// VolumeUsageData Usage details about the volume. This information is used by the
+// `GET /system/df` endpoint, and omitted in other endpoints.
+//
+// swagger:model VolumeUsageData
+type VolumeUsageData struct {
+
+ // The number of containers referencing this volume. This field
+ // is set to `-1` if the reference-count is not available.
+ //
+ // Required: true
+ RefCount int64 `json:"RefCount"`
+
+ // Amount of disk space used by the volume (in bytes). This information
+ // is only available for volumes created with the `"local"` volume
+ // driver. For volumes created with other volume drivers, this field
+ // is set to `-1` ("not available")
+ //
+ // Required: true
+ Size int64 `json:"Size"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go
new file mode 100644
index 000000000..9f70e43ca
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go
@@ -0,0 +1,29 @@
+package volume
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// VolumesCreateBody volumes create body
+// swagger:model VolumesCreateBody
+type VolumesCreateBody struct {
+
+ // Name of the volume driver to use.
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
+ // Required: true
+ DriverOpts map[string]string `json:"DriverOpts"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // The new volume's name. If not specified, Docker generates a name.
+ // Required: true
+ Name string `json:"Name"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go
new file mode 100644
index 000000000..833dad933
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go
@@ -0,0 +1,23 @@
+package volume
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+import "github.com/docker/docker/api/types"
+
+// VolumesListOKBody volumes list o k body
+// swagger:model VolumesListOKBody
+type VolumesListOKBody struct {
+
+ // List of volumes
+ // Required: true
+ Volumes []*types.Volume `json:"Volumes"`
+
+ // Warnings that occurred when fetching the list of volumes
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md
new file mode 100644
index 000000000..059dfb3ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/README.md
@@ -0,0 +1,35 @@
+# Go client for the Docker Engine API
+
+The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc.
+
+For example, to list running containers (the equivalent of `docker ps`):
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+)
+
+func main() {
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ panic(err)
+ }
+
+ containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, container := range containers {
+ fmt.Printf("%s %s\n", container.ID[:10], container.Image)
+ }
+}
+```
+
+[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client)
diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go
new file mode 100644
index 000000000..ccab115d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/build_prune.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// BuildCachePrune requests the daemon to delete unused cache data
+func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) {
+ if err := cli.NewVersionError("1.31", "build prune"); err != nil {
+ return nil, err
+ }
+
+ report := types.BuildCachePruneReport{}
+
+ serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return &report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go
new file mode 100644
index 000000000..0effe498b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_create.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointCreate creates a checkpoint from the given container with the given name
+func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
+ resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go
new file mode 100644
index 000000000..e6e75588b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointDelete deletes the checkpoint with the given name from the given container
+func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error {
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go
new file mode 100644
index 000000000..ffe44bc97
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/checkpoint_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointList returns the checkpoints of the given container in the docker host
+func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
+ var checkpoints []types.Checkpoint
+
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return checkpoints, containerNotFoundError{container}
+ }
+ return checkpoints, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&checkpoints)
+ ensureReaderClosed(resp)
+ return checkpoints, err
+}
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
new file mode 100644
index 000000000..c4e3914b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -0,0 +1,314 @@
+/*
+Package client is a Go client for the Docker Engine API.
+
+The "docker" command uses this package to communicate with the daemon. It can also
+be used by your own Go applications to do anything the command-line interface does
+- running containers, pulling images, managing swarms, etc.
+
+For more information about the Engine API, see the documentation:
+https://docs.docker.com/engine/reference/api/
+
+Usage
+
+You use the library by creating a client object and calling methods on it. The
+client can be created either from environment variables with NewEnvClient, or
+configured manually with NewClient.
+
+For example, to list running containers (the equivalent of "docker ps"):
+
+ package main
+
+ import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+ )
+
+ func main() {
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ panic(err)
+ }
+
+ containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, container := range containers {
+ fmt.Printf("%s %s\n", container.ID[:10], container.Image)
+ }
+ }
+
+*/
+package client
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/api"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/go-connections/sockets"
+ "github.com/docker/go-connections/tlsconfig"
+ "golang.org/x/net/context"
+)
+
+// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
+var ErrRedirect = errors.New("unexpected redirect in response")
+
+// Client is the API client that performs all operations
+// against a docker server.
+type Client struct {
+ // scheme sets the scheme for the client
+ scheme string
+ // host holds the server address to connect to
+ host string
+ // proto holds the client protocol i.e. unix.
+ proto string
+ // addr holds the client address.
+ addr string
+ // basePath holds the path to prepend to the requests.
+ basePath string
+ // client used to send and receive http requests.
+ client *http.Client
+ // version of the server to talk to.
+ version string
+ // custom http headers configured by users.
+ customHTTPHeaders map[string]string
+ // manualOverride is set to true when the version was set by users.
+ manualOverride bool
+}
+
+// CheckRedirect specifies the policy for dealing with redirect responses:
+// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
+//
+// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
+// The Docker client (and by extension docker API client) can be made to to send a request
+// like POST /containers//start where what would normally be in the name section of the URL is empty.
+// This triggers an HTTP 301 from the daemon.
+// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon.
+// This behavior change manifests in the client in that before the 301 was not followed and
+// the client did not generate an error, but now results in a message like Error response from daemon: page not found.
+func CheckRedirect(req *http.Request, via []*http.Request) error {
+ if via[0].Method == http.MethodGet {
+ return http.ErrUseLastResponse
+ }
+ return ErrRedirect
+}
+
+// NewEnvClient initializes a new API client based on environment variables.
+// Use DOCKER_HOST to set the url to the docker server.
+// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
+// Use DOCKER_CERT_PATH to load the TLS certificates from.
+// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
+func NewEnvClient() (*Client, error) {
+ var client *http.Client
+ if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
+ options := tlsconfig.Options{
+ CAFile: filepath.Join(dockerCertPath, "ca.pem"),
+ CertFile: filepath.Join(dockerCertPath, "cert.pem"),
+ KeyFile: filepath.Join(dockerCertPath, "key.pem"),
+ InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
+ }
+ tlsc, err := tlsconfig.Client(options)
+ if err != nil {
+ return nil, err
+ }
+
+ client = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsc,
+ },
+ CheckRedirect: CheckRedirect,
+ }
+ }
+
+ host := os.Getenv("DOCKER_HOST")
+ if host == "" {
+ host = DefaultDockerHost
+ }
+ version := os.Getenv("DOCKER_API_VERSION")
+ if version == "" {
+ version = api.DefaultVersion
+ }
+
+ cli, err := NewClient(host, version, client, nil)
+ if err != nil {
+ return cli, err
+ }
+ if os.Getenv("DOCKER_API_VERSION") != "" {
+ cli.manualOverride = true
+ }
+ return cli, nil
+}
+
+// NewClient initializes a new API client for the given host and API version.
+// It uses the given http client as transport.
+// It also initializes the custom http headers to add to each request.
+//
+// It won't send any version information if the version number is empty. It is
+// highly recommended that you set a version or your client may break if the
+// server is upgraded.
+func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
+ proto, addr, basePath, err := ParseHost(host)
+ if err != nil {
+ return nil, err
+ }
+
+ if client != nil {
+ if _, ok := client.Transport.(http.RoundTripper); !ok {
+ return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport)
+ }
+ } else {
+ transport := new(http.Transport)
+ sockets.ConfigureTransport(transport, proto, addr)
+ client = &http.Client{
+ Transport: transport,
+ CheckRedirect: CheckRedirect,
+ }
+ }
+
+ scheme := "http"
+ tlsConfig := resolveTLSConfig(client.Transport)
+ if tlsConfig != nil {
+ // TODO(stevvooe): This isn't really the right way to write clients in Go.
+ // `NewClient` should probably only take an `*http.Client` and work from there.
+ // Unfortunately, the model of having a host-ish/url-thingy as the connection
+ // string has us confusing protocol and transport layers. We continue doing
+ // this to avoid breaking existing clients but this should be addressed.
+ scheme = "https"
+ }
+
+ return &Client{
+ scheme: scheme,
+ host: host,
+ proto: proto,
+ addr: addr,
+ basePath: basePath,
+ client: client,
+ version: version,
+ customHTTPHeaders: httpHeaders,
+ }, nil
+}
+
+// Close ensures that transport.Client is closed
+// especially needed while using NewClient with *http.Client = nil
+// for example
+// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
+func (cli *Client) Close() error {
+
+ if t, ok := cli.client.Transport.(*http.Transport); ok {
+ t.CloseIdleConnections()
+ }
+
+ return nil
+}
+
+// getAPIPath returns the versioned request path to call the api.
+// It appends the query parameters to the path if they are not empty.
+func (cli *Client) getAPIPath(p string, query url.Values) string {
+ var apiPath string
+ if cli.version != "" {
+ v := strings.TrimPrefix(cli.version, "v")
+ apiPath = cli.basePath + "/v" + v + p
+ } else {
+ apiPath = cli.basePath + p
+ }
+
+ u := &url.URL{
+ Path: apiPath,
+ }
+ if len(query) > 0 {
+ u.RawQuery = query.Encode()
+ }
+ return u.String()
+}
+
+// ClientVersion returns the version string associated with this
+// instance of the Client. Note that this value can be changed
+// via the DOCKER_API_VERSION env var.
+// This operation doesn't acquire a mutex.
+func (cli *Client) ClientVersion() string {
+ return cli.version
+}
+
+// NegotiateAPIVersion updates the version string associated with this
+// instance of the Client to match the latest version the server supports
+func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
+ ping, _ := cli.Ping(ctx)
+ cli.NegotiateAPIVersionPing(ping)
+}
+
+// NegotiateAPIVersionPing updates the version string associated with this
+// instance of the Client to match the latest version the server supports
+func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
+ if cli.manualOverride {
+ return
+ }
+
+ // try the latest version before versioning headers existed
+ if p.APIVersion == "" {
+ p.APIVersion = "1.24"
+ }
+
+ // if the client is not initialized with a version, start with the latest supported version
+ if cli.version == "" {
+ cli.version = api.DefaultVersion
+ }
+
+ // if server version is lower than the maximum version supported by the Client, downgrade
+ if versions.LessThan(p.APIVersion, api.DefaultVersion) {
+ cli.version = p.APIVersion
+ }
+}
+
+// DaemonHost returns the host associated with this instance of the Client.
+// This operation doesn't acquire a mutex.
+func (cli *Client) DaemonHost() string {
+ return cli.host
+}
+
+// ParseHost verifies that the given host strings is valid.
+func ParseHost(host string) (string, string, string, error) {
+ protoAddrParts := strings.SplitN(host, "://", 2)
+ if len(protoAddrParts) == 1 {
+ return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
+ }
+
+ var basePath string
+ proto, addr := protoAddrParts[0], protoAddrParts[1]
+ if proto == "tcp" {
+ parsed, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", "", "", err
+ }
+ addr = parsed.Host
+ basePath = parsed.Path
+ }
+ return proto, addr, basePath, nil
+}
+
+// CustomHTTPHeaders returns the custom http headers associated with this
+// instance of the Client. This operation doesn't acquire a mutex.
+func (cli *Client) CustomHTTPHeaders() map[string]string {
+ m := make(map[string]string)
+ for k, v := range cli.customHTTPHeaders {
+ m[k] = v
+ }
+ return m
+}
+
+// SetCustomHTTPHeaders updates the custom http headers associated with this
+// instance of the Client. This operation doesn't acquire a mutex.
+func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
+ cli.customHTTPHeaders = headers
+}
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
new file mode 100644
index 000000000..89de892c8
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_unix.go
@@ -0,0 +1,6 @@
+// +build linux freebsd solaris openbsd darwin
+
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "unix:///var/run/docker.sock"
diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go
new file mode 100644
index 000000000..07c0c7a77
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/client_windows.go
@@ -0,0 +1,4 @@
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "npipe:////./pipe/docker_engine"
diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go
new file mode 100644
index 000000000..bc4a952b2
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigCreate creates a new Config.
+func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
+ var response types.ConfigCreateResponse
+ if err := cli.NewVersionError("1.30", "config create"); err != nil {
+ return response, err
+ }
+ resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go
new file mode 100644
index 000000000..ebb6d636c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_inspect.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigInspectWithRaw returns the config information with raw data
+func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
+ if err := cli.NewVersionError("1.30", "config inspect"); err != nil {
+ return swarm.Config{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return swarm.Config{}, nil, configNotFoundError{id}
+ }
+ return swarm.Config{}, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return swarm.Config{}, nil, err
+ }
+
+ var config swarm.Config
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&config)
+
+ return config, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
new file mode 100644
index 000000000..8483ca14d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_list.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigList returns the list of configs.
+func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
+ if err := cli.NewVersionError("1.30", "config list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/configs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var configs []swarm.Config
+ err = json.NewDecoder(resp.body).Decode(&configs)
+ ensureReaderClosed(resp)
+ return configs, err
+}
diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go
new file mode 100644
index 000000000..726b5c853
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_remove.go
@@ -0,0 +1,13 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ConfigRemove removes a Config.
+func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
+ if err := cli.NewVersionError("1.30", "config remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go
new file mode 100644
index 000000000..823751bb8
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/config_update.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigUpdate attempts to update a Config
+func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
+ if err := cli.NewVersionError("1.30", "config update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go
new file mode 100644
index 000000000..0fdf3ed0c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_attach.go
@@ -0,0 +1,57 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerAttach attaches a connection to a container in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
+ query := url.Values{}
+ if options.Stream {
+ query.Set("stream", "1")
+ }
+ if options.Stdin {
+ query.Set("stdin", "1")
+ }
+ if options.Stdout {
+ query.Set("stdout", "1")
+ }
+ if options.Stderr {
+ query.Set("stderr", "1")
+ }
+ if options.DetachKeys != "" {
+ query.Set("detachKeys", options.DetachKeys)
+ }
+ if options.Logs {
+ query.Set("logs", "1")
+ }
+
+ headers := map[string][]string{"Content-Type": {"text/plain"}}
+ return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
new file mode 100644
index 000000000..531d796ee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_commit.go
@@ -0,0 +1,55 @@
+package client
+
+import (
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerCommit applies changes into a container and creates a new tagged image.
+func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) {
+ var repository, tag string
+ if options.Reference != "" {
+ ref, err := reference.ParseNormalizedNamed(options.Reference)
+ if err != nil {
+ return types.IDResponse{}, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference")
+ }
+ ref = reference.TagNameOnly(ref)
+
+ if tagged, ok := ref.(reference.Tagged); ok {
+ tag = tagged.Tag()
+ }
+ repository = reference.FamiliarName(ref)
+ }
+
+ query := url.Values{}
+ query.Set("container", container)
+ query.Set("repo", repository)
+ query.Set("tag", tag)
+ query.Set("comment", options.Comment)
+ query.Set("author", options.Author)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+ if options.Pause != true {
+ query.Set("pause", "0")
+ }
+
+ var response types.IDResponse
+ resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go
new file mode 100644
index 000000000..30ba6803f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_copy.go
@@ -0,0 +1,102 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ContainerStatPath returns Stat information about a path inside the container filesystem.
+func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+
+ urlStr := "/containers/" + containerID + "/archive"
+ response, err := cli.head(ctx, urlStr, query, nil)
+ if err != nil {
+ return types.ContainerPathStat{}, err
+ }
+ defer ensureReaderClosed(response)
+ return getContainerPathStatFromHeader(response.header)
+}
+
+// CopyToContainer copies content into the container filesystem.
+// Note that `content` must be a Reader for a TAR
+func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+ // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
+ if !options.AllowOverwriteDirWithFile {
+ query.Set("noOverwriteDirNonDir", "true")
+ }
+
+ if options.CopyUIDGID {
+ query.Set("copyUIDGID", "true")
+ }
+
+ apiPath := "/containers/" + container + "/archive"
+
+ response, err := cli.putRaw(ctx, apiPath, query, content, nil)
+ if err != nil {
+ return err
+ }
+ defer ensureReaderClosed(response)
+
+ if response.statusCode != http.StatusOK {
+ return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ return nil
+}
+
+// CopyFromContainer gets the content from the container and returns it as a Reader
+// to manipulate it in the host. It's up to the caller to close the reader.
+func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
+ query := make(url.Values, 1)
+ query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
+
+ apiPath := "/containers/" + container + "/archive"
+ response, err := cli.get(ctx, apiPath, query, nil)
+ if err != nil {
+ return nil, types.ContainerPathStat{}, err
+ }
+
+ if response.statusCode != http.StatusOK {
+ return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ // In order to get the copy behavior right, we need to know information
+ // about both the source and the destination. The response headers include
+ // stat info about the source that we can use in deciding exactly how to
+ // copy it locally. Along with the stat info about the local destination,
+ // we have everything we need to handle the multiple possibilities there
+ // can be when copying a file/dir from one location to another file/dir.
+ stat, err := getContainerPathStatFromHeader(response.header)
+ if err != nil {
+ return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
+ }
+ return response.body, stat, err
+}
+
+func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
+ var stat types.ContainerPathStat
+
+ encodedStat := header.Get("X-Docker-Container-Path-Stat")
+ statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
+
+ err := json.NewDecoder(statDecoder).Decode(&stat)
+ if err != nil {
+ err = fmt.Errorf("unable to decode container path stat header: %s", err)
+ }
+
+ return stat, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
new file mode 100644
index 000000000..6841b0b28
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_create.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+type configWrapper struct {
+ *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+}
+
+// ContainerCreate creates a new container based in the given configuration.
+// It can be associated with a name, but it's not mandatory.
+func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) {
+ var response container.ContainerCreateCreatedBody
+
+ if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
+ return response, err
+ }
+
+ // When using API 1.24 and under, the client is responsible for removing the container
+ if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") {
+ hostConfig.AutoRemove = false
+ }
+
+ query := url.Values{}
+ if containerName != "" {
+ query.Set("name", containerName)
+ }
+
+ body := configWrapper{
+ Config: config,
+ HostConfig: hostConfig,
+ NetworkingConfig: networkingConfig,
+ }
+
+ serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
+ if err != nil {
+ if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
+ return response, imageNotFoundError{config.Image}
+ }
+ return response, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go
new file mode 100644
index 000000000..884dc9fee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_diff.go
@@ -0,0 +1,23 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerDiff shows differences in a container filesystem since it was started.
+func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) {
+ var changes []container.ContainerChangeResponseItem
+
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
+ if err != nil {
+ return changes, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&changes)
+ ensureReaderClosed(serverResp)
+ return changes, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go
new file mode 100644
index 000000000..0665c54fb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_exec.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerExecCreate creates a new exec configuration to run an exec process.
+func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
+ var response types.IDResponse
+
+ if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil {
+ return response, err
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
+ if err != nil {
+ return response, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
+
+// ContainerExecStart starts an exec process already created in the docker host.
+func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
+ resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
+
+// ContainerExecAttach attaches a connection to an exec process in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) {
+ headers := map[string][]string{"Content-Type": {"application/json"}}
+ return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
+}
+
+// ContainerExecInspect returns information about a specific exec process on the docker host.
+func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
+ var response types.ContainerExecInspect
+ resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go
new file mode 100644
index 000000000..52194f3d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_export.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerExport retrieves the raw contents of a container
+// and returns them as an io.ReadCloser. It's up to the caller
+// to close the stream.
+func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return serverResp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go
new file mode 100644
index 000000000..17f180974
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_inspect.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerInspect returns the container information.
+func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, err
+ }
+
+ var response types.ContainerJSON
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
+
+// ContainerInspectWithRaw returns the container information and its raw representation.
+func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
+ query := url.Values{}
+ if getSize {
+ query.Set("size", "1")
+ }
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ContainerJSON{}, nil, err
+ }
+
+ var response types.ContainerJSON
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go
new file mode 100644
index 000000000..29f80c73a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_kill.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerKill terminates the container process but does not remove the container from the docker host.
+func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
+ query := url.Values{}
+ query.Set("signal", signal)
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go
new file mode 100644
index 000000000..439891219
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_list.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ContainerList returns the list of containers in the docker host.
+func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ query := url.Values{}
+
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ if options.Limit != -1 {
+ query.Set("limit", strconv.Itoa(options.Limit))
+ }
+
+ if options.Since != "" {
+ query.Set("since", options.Since)
+ }
+
+ if options.Before != "" {
+ query.Set("before", options.Before)
+ }
+
+ if options.Size {
+ query.Set("size", "1")
+ }
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/containers/json", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var containers []types.Container
+ err = json.NewDecoder(resp.body).Decode(&containers)
+ ensureReaderClosed(resp)
+ return containers, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go
new file mode 100644
index 000000000..0f32e9f12
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_logs.go
@@ -0,0 +1,72 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
+// It's up to the caller to close the stream.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go
new file mode 100644
index 000000000..412067a78
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_pause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerPause pauses the main process of a given container without terminating it.
+func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go
new file mode 100644
index 000000000..b58217086
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ContainersPrune requests the daemon to delete unused data
+func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
+ var report types.ContainersPruneReport
+
+ if err := cli.NewVersionError("1.25", "container prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go
new file mode 100644
index 000000000..3a79590ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_remove.go
@@ -0,0 +1,27 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerRemove kills and removes a container from the docker host.
+func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
+ query := url.Values{}
+ if options.RemoveVolumes {
+ query.Set("v", "1")
+ }
+ if options.RemoveLinks {
+ query.Set("link", "1")
+ }
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go
new file mode 100644
index 000000000..0e718da7c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_rename.go
@@ -0,0 +1,16 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerRename changes the name of a given container.
+func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
+ query := url.Values{}
+ query.Set("name", newContainerName)
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go
new file mode 100644
index 000000000..66c3cc194
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_resize.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerResize changes the size of the tty for a container.
+func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
+}
+
+// ContainerExecResize changes the size of the tty for an exec process running inside a container.
+func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
+}
+
+func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
+ query := url.Values{}
+ query.Set("h", strconv.Itoa(int(height)))
+ query.Set("w", strconv.Itoa(int(width)))
+
+ resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go
new file mode 100644
index 000000000..74d7455f0
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_restart.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/docker/api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerRestart stops and starts a container again.
+// It makes the daemon to wait for the container to be up again for
+// a specific amount of time, given the timeout.
+func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go
new file mode 100644
index 000000000..b1f08de41
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_start.go
@@ -0,0 +1,24 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ContainerStart sends a request to the docker daemon to start a container.
+func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
+ query := url.Values{}
+ if len(options.CheckpointID) != 0 {
+ query.Set("checkpoint", options.CheckpointID)
+ }
+ if len(options.CheckpointDir) != 0 {
+ query.Set("checkpoint-dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go
new file mode 100644
index 000000000..4758c66e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_stats.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerStats returns near realtime stats for a given container.
+// It's up to the caller to close the io.ReadCloser returned.
+func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
+ query := url.Values{}
+ query.Set("stream", "0")
+ if stream {
+ query.Set("stream", "1")
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
+ if err != nil {
+ return types.ContainerStats{}, err
+ }
+
+ osType := getDockerOS(resp.header.Get("Server"))
+ return types.ContainerStats{Body: resp.body, OSType: osType}, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go
new file mode 100644
index 000000000..b5418ae8c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_stop.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/docker/api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerStop stops a container without terminating the process.
+// The process is blocked until the container stops or the timeout expires.
+func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go
new file mode 100644
index 000000000..9689123a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_top.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerTop shows process information from within a container.
+func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) {
+ var response container.ContainerTopOKBody
+ query := url.Values{}
+ if len(arguments) > 0 {
+ query.Set("ps_args", strings.Join(arguments, " "))
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go
new file mode 100644
index 000000000..5c7621125
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_unpause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerUnpause resumes the process execution within a container
+func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go
new file mode 100644
index 000000000..5082f22df
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_update.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerUpdate updates resources of a container
+func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) {
+ var response container.ContainerUpdateOKBody
+ serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go
new file mode 100644
index 000000000..854c6c053
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/container_wait.go
@@ -0,0 +1,84 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ContainerWait waits until the specified container is in a certain state
+// indicated by the given condition, either "not-running" (default),
+// "next-exit", or "removed".
+//
+// If this client's API version is before 1.30, condition is ignored and
+// ContainerWait will return immediately with the two channels, as the server
+// will wait as if the condition were "not-running".
+//
+// If this client's API version is at least 1.30, ContainerWait blocks until
+// the request has been acknowledged by the server (with a response header),
+// then returns two channels on which the caller can wait for the exit status
+// of the container or an error if there was a problem either beginning the
+// wait request or in getting the response. This allows the caller to
+// synchronize ContainerWait with other calls, such as specifying a
+// "next-exit" condition before issuing a ContainerStart request.
+func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {
+ if versions.LessThan(cli.ClientVersion(), "1.30") {
+ return cli.legacyContainerWait(ctx, containerID)
+ }
+
+ resultC := make(chan container.ContainerWaitOKBody)
+ errC := make(chan error, 1)
+
+ query := url.Values{}
+ query.Set("condition", string(condition))
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
+ if err != nil {
+ defer ensureReaderClosed(resp)
+ errC <- err
+ return resultC, errC
+ }
+
+ go func() {
+ defer ensureReaderClosed(resp)
+ var res container.ContainerWaitOKBody
+ if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
+ errC <- err
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
+
+// legacyContainerWait returns immediately and doesn't have an option to wait
+// until the container is removed.
+func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) {
+ resultC := make(chan container.ContainerWaitOKBody)
+ errC := make(chan error)
+
+ go func() {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
+ if err != nil {
+ errC <- err
+ return
+ }
+ defer ensureReaderClosed(resp)
+
+ var res container.ContainerWaitOKBody
+ if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
+ errC <- err
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go
new file mode 100644
index 000000000..03c80b39a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/disk_usage.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// DiskUsage requests the current data usage from the daemon
+func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) {
+ var du types.DiskUsage
+
+ serverResp, err := cli.get(ctx, "/system/df", nil, nil)
+ if err != nil {
+ return du, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil {
+ return du, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return du, nil
+}
diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go
new file mode 100644
index 000000000..aa5bc6a6c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/distribution_inspect.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ registrytypes "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// DistributionInspect returns the image digest with full Manifest
+func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) {
+ // Contact the registry to retrieve digest and platform information
+ var distributionInspect registrytypes.DistributionInspect
+
+ if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil {
+ return distributionInspect, err
+ }
+ var headers map[string][]string
+
+ if encodedRegistryAuth != "" {
+ headers = map[string][]string{
+ "X-Registry-Auth": {encodedRegistryAuth},
+ }
+ }
+
+ resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers)
+ if err != nil {
+ return distributionInspect, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&distributionInspect)
+ ensureReaderClosed(resp)
+ return distributionInspect, err
+}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
new file mode 100644
index 000000000..fc7df9f1e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/errors.go
@@ -0,0 +1,300 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/types/versions"
+ "github.com/pkg/errors"
+)
+
+// errConnectionFailed implements an error returned when connection failed.
+type errConnectionFailed struct {
+ host string
+}
+
+// Error returns a string representation of an errConnectionFailed
+func (err errConnectionFailed) Error() string {
+ if err.host == "" {
+ return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?"
+ }
+ return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host)
+}
+
+// IsErrConnectionFailed returns true if the error is caused by connection failed.
+func IsErrConnectionFailed(err error) bool {
+ _, ok := errors.Cause(err).(errConnectionFailed)
+ return ok
+}
+
+// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
+func ErrorConnectionFailed(host string) error {
+ return errConnectionFailed{host: host}
+}
+
+type notFound interface {
+ error
+ NotFound() bool // Is the error a NotFound error
+}
+
+// IsErrNotFound returns true if the error is caused with an
+// object (image, container, network, volume, …) is not found in the docker host.
+func IsErrNotFound(err error) bool {
+ te, ok := err.(notFound)
+ return ok && te.NotFound()
+}
+
+// imageNotFoundError implements an error returned when an image is not in the docker host.
+type imageNotFoundError struct {
+ imageID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e imageNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of an imageNotFoundError
+func (e imageNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such image: %s", e.imageID)
+}
+
+// IsErrImageNotFound returns true if the error is caused
+// when an image is not found in the docker host.
+func IsErrImageNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// containerNotFoundError implements an error returned when a container is not in the docker host.
+type containerNotFoundError struct {
+ containerID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e containerNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a containerNotFoundError
+func (e containerNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such container: %s", e.containerID)
+}
+
+// IsErrContainerNotFound returns true if the error is caused
+// when a container is not found in the docker host.
+func IsErrContainerNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// networkNotFoundError implements an error returned when a network is not in the docker host.
+type networkNotFoundError struct {
+ networkID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e networkNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a networkNotFoundError
+func (e networkNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such network: %s", e.networkID)
+}
+
+// IsErrNetworkNotFound returns true if the error is caused
+// when a network is not found in the docker host.
+func IsErrNetworkNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// volumeNotFoundError implements an error returned when a volume is not in the docker host.
+type volumeNotFoundError struct {
+ volumeID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e volumeNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a volumeNotFoundError
+func (e volumeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
+}
+
+// IsErrVolumeNotFound returns true if the error is caused
+// when a volume is not found in the docker host.
+func IsErrVolumeNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// unauthorizedError represents an authorization error in a remote registry.
+type unauthorizedError struct {
+ cause error
+}
+
+// Error returns a string representation of an unauthorizedError
+func (u unauthorizedError) Error() string {
+ return u.cause.Error()
+}
+
+// IsErrUnauthorized returns true if the error is caused
+// when a remote registry authentication fails
+func IsErrUnauthorized(err error) bool {
+ _, ok := err.(unauthorizedError)
+ return ok
+}
+
+// nodeNotFoundError implements an error returned when a node is not found.
+type nodeNotFoundError struct {
+ nodeID string
+}
+
+// Error returns a string representation of a nodeNotFoundError
+func (e nodeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such node: %s", e.nodeID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e nodeNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrNodeNotFound returns true if the error is caused
+// when a node is not found.
+func IsErrNodeNotFound(err error) bool {
+ _, ok := err.(nodeNotFoundError)
+ return ok
+}
+
+// serviceNotFoundError implements an error returned when a service is not found.
+type serviceNotFoundError struct {
+ serviceID string
+}
+
+// Error returns a string representation of a serviceNotFoundError
+func (e serviceNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such service: %s", e.serviceID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e serviceNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrServiceNotFound returns true if the error is caused
+// when a service is not found.
+func IsErrServiceNotFound(err error) bool {
+ _, ok := err.(serviceNotFoundError)
+ return ok
+}
+
+// taskNotFoundError implements an error returned when a task is not found.
+type taskNotFoundError struct {
+ taskID string
+}
+
+// Error returns a string representation of a taskNotFoundError
+func (e taskNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such task: %s", e.taskID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e taskNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrTaskNotFound returns true if the error is caused
+// when a task is not found.
+func IsErrTaskNotFound(err error) bool {
+ _, ok := err.(taskNotFoundError)
+ return ok
+}
+
+type pluginPermissionDenied struct {
+ name string
+}
+
+func (e pluginPermissionDenied) Error() string {
+ return "Permission denied while installing plugin " + e.name
+}
+
+// IsErrPluginPermissionDenied returns true if the error is caused
+// when a user denies a plugin's permissions
+func IsErrPluginPermissionDenied(err error) bool {
+ _, ok := err.(pluginPermissionDenied)
+ return ok
+}
+
+// NewVersionError returns an error if the APIVersion required
+// if less than the current supported version
+func (cli *Client) NewVersionError(APIrequired, feature string) error {
+ if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
+ return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
+ }
+ return nil
+}
+
+// secretNotFoundError implements an error returned when a secret is not found.
+type secretNotFoundError struct {
+ name string
+}
+
+// Error returns a string representation of a secretNotFoundError
+func (e secretNotFoundError) Error() string {
+ return fmt.Sprintf("Error: no such secret: %s", e.name)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e secretNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrSecretNotFound returns true if the error is caused
+// when a secret is not found.
+func IsErrSecretNotFound(err error) bool {
+ _, ok := err.(secretNotFoundError)
+ return ok
+}
+
+// configNotFoundError implements an error returned when a config is not found.
+type configNotFoundError struct {
+ name string
+}
+
+// Error returns a string representation of a configNotFoundError
+func (e configNotFoundError) Error() string {
+ return fmt.Sprintf("Error: no such config: %s", e.name)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e configNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrConfigNotFound returns true if the error is caused
+// when a config is not found.
+func IsErrConfigNotFound(err error) bool {
+ _, ok := err.(configNotFoundError)
+ return ok
+}
+
+// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
+type pluginNotFoundError struct {
+ name string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e pluginNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a pluginNotFoundError
+func (e pluginNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such plugin: %s", e.name)
+}
+
+// IsErrPluginNotFound returns true if the error is caused
+// when a plugin is not found in the docker host.
+func IsErrPluginNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go
new file mode 100644
index 000000000..af47aefa7
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/events.go
@@ -0,0 +1,102 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// Events returns a stream of events in the daemon. It's up to the caller to close the stream
+// by cancelling the context. Once the stream has been completely read an io.EOF error will
+// be sent over the error channel. If an error is sent all processing will be stopped. It's up
+// to the caller to reopen the stream in the event of an error by reinvoking this method.
+func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
+
+ messages := make(chan events.Message)
+ errs := make(chan error, 1)
+
+ started := make(chan struct{})
+ go func() {
+ defer close(errs)
+
+ query, err := buildEventsQueryParams(cli.version, options)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+
+ resp, err := cli.get(ctx, "/events", query, nil)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+ defer resp.body.Close()
+
+ decoder := json.NewDecoder(resp.body)
+
+ close(started)
+ for {
+ select {
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ default:
+ var event events.Message
+ if err := decoder.Decode(&event); err != nil {
+ errs <- err
+ return
+ }
+
+ select {
+ case messages <- event:
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ }
+ }
+ }
+ }()
+ <-started
+
+ return messages, errs
+}
+
+func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) {
+ query := url.Values{}
+ ref := time.Now()
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Until != "" {
+ ts, err := timetypes.GetTimestamp(options.Until, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("until", ts)
+ }
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
new file mode 100644
index 000000000..8cf0119f3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/hijack.go
@@ -0,0 +1,208 @@
+package client
+
+import (
+ "bufio"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/pkg/tlsconfig"
+ "github.com/docker/go-connections/sockets"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// tlsClientCon holds tls information and a dialed connection.
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if conn, ok := c.rawConn.(types.CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// postHijacked sends a POST request and hijacks the connection.
+func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
+ bodyEncoded, err := encodeData(body)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ apiPath := cli.getAPIPath(path, query)
+ req, err := http.NewRequest("POST", apiPath, bodyEncoded)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+ req = cli.addHeaders(req, headers)
+
+ conn, err := cli.setupHijackConn(req, "tcp")
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+ return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
+
+// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
+// order to return our custom tlsClientCon struct which holds both the tls.Conn
+// object _and_ its underlying raw connection. The rationale for this is that
+// we need to be able to close the write end of the connection when attaching,
+// which tls.Conn does not provide.
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ proxyDialer, err := sockets.DialerFromEnvironment(dialer)
+ if err != nil {
+ return nil, err
+ }
+
+ rawConn, err := proxyDialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := rawConn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ config = tlsconfig.Clone(config)
+ config.ServerName = hostname
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
+ if tlsConfig != nil && proto != "unix" && proto != "npipe" {
+ // Notice this isn't Go standard's tls.Dial function
+ return tlsDial(proto, addr, tlsConfig)
+ }
+ if proto == "npipe" {
+ return sockets.DialPipe(addr, 32*time.Second)
+ }
+ return net.Dial(proto, addr)
+}
+
+func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) {
+ req.Host = cli.addr
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", proto)
+
+ conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport))
+ if err != nil {
+ return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
+ }
+
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ clientconn := httputil.NewClientConn(conn, nil)
+ defer clientconn.Close()
+
+ // Server hijacks the connection, error 'connection closed' expected
+ resp, err := clientconn.Do(req)
+ if err != httputil.ErrPersistEOF {
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ resp.Body.Close()
+ return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
+ }
+ }
+
+ c, br := clientconn.Hijack()
+ if br.Buffered() > 0 {
+ // If there is buffered content, wrap the connection
+ c = &hijackedConn{c, br}
+ } else {
+ br.Reset(nil)
+ }
+
+ return c, nil
+}
+
+type hijackedConn struct {
+ net.Conn
+ r *bufio.Reader
+}
+
+func (c *hijackedConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go
new file mode 100644
index 000000000..44a215f90
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_build.go
@@ -0,0 +1,128 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+)
+
+// ImageBuild sends request to the daemon to build images.
+// The Body in the response implement an io.ReadCloser and it's up to the caller to
+// close it.
+func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
+ query, err := cli.imageBuildOptionsToQuery(options)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ headers := http.Header(make(map[string][]string))
+ buf, err := json.Marshal(options.AuthConfigs)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+ headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
+ headers.Set("Content-Type", "application/x-tar")
+
+ serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ osType := getDockerOS(serverResp.header.Get("Server"))
+
+ return types.ImageBuildResponse{
+ Body: serverResp.body,
+ OSType: osType,
+ }, nil
+}
+
+func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
+ query := url.Values{
+ "t": options.Tags,
+ "securityopt": options.SecurityOpt,
+ "extrahosts": options.ExtraHosts,
+ }
+ if options.SuppressOutput {
+ query.Set("q", "1")
+ }
+ if options.RemoteContext != "" {
+ query.Set("remote", options.RemoteContext)
+ }
+ if options.NoCache {
+ query.Set("nocache", "1")
+ }
+ if options.Remove {
+ query.Set("rm", "1")
+ } else {
+ query.Set("rm", "0")
+ }
+
+ if options.ForceRemove {
+ query.Set("forcerm", "1")
+ }
+
+ if options.PullParent {
+ query.Set("pull", "1")
+ }
+
+ if options.Squash {
+ if err := cli.NewVersionError("1.25", "squash"); err != nil {
+ return query, err
+ }
+ query.Set("squash", "1")
+ }
+
+ if !container.Isolation.IsDefault(options.Isolation) {
+ query.Set("isolation", string(options.Isolation))
+ }
+
+ query.Set("cpusetcpus", options.CPUSetCPUs)
+ query.Set("networkmode", options.NetworkMode)
+ query.Set("cpusetmems", options.CPUSetMems)
+ query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
+ query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
+ query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
+ query.Set("memory", strconv.FormatInt(options.Memory, 10))
+ query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
+ query.Set("cgroupparent", options.CgroupParent)
+ query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
+ query.Set("dockerfile", options.Dockerfile)
+ query.Set("target", options.Target)
+
+ ulimitsJSON, err := json.Marshal(options.Ulimits)
+ if err != nil {
+ return query, err
+ }
+ query.Set("ulimits", string(ulimitsJSON))
+
+ buildArgsJSON, err := json.Marshal(options.BuildArgs)
+ if err != nil {
+ return query, err
+ }
+ query.Set("buildargs", string(buildArgsJSON))
+
+ labelsJSON, err := json.Marshal(options.Labels)
+ if err != nil {
+ return query, err
+ }
+ query.Set("labels", string(labelsJSON))
+
+ cacheFromJSON, err := json.Marshal(options.CacheFrom)
+ if err != nil {
+ return query, err
+ }
+ query.Set("cachefrom", string(cacheFromJSON))
+ if options.SessionID != "" {
+ query.Set("session", options.SessionID)
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go
new file mode 100644
index 000000000..4436abb0d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_create.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImageCreate creates a new image based in the parent options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(parentReference)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/create", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go
new file mode 100644
index 000000000..7b4babcba
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_history.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/image"
+ "golang.org/x/net/context"
+)
+
+// ImageHistory returns the changes in an image in history format.
+func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) {
+ var history []image.HistoryResponseItem
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
+ if err != nil {
+ return history, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&history)
+ ensureReaderClosed(serverResp)
+ return history, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go
new file mode 100644
index 000000000..d7dedd823
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_import.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImageImport creates a new image based in the source options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
+ if ref != "" {
+ //Check if the given image name can be resolved
+ if _, err := reference.ParseNormalizedNamed(ref); err != nil {
+ return nil, err
+ }
+ }
+
+ query := url.Values{}
+ query.Set("fromSrc", source.SourceName)
+ query.Set("repo", ref)
+ query.Set("tag", options.Tag)
+ query.Set("message", options.Message)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+
+ resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
new file mode 100644
index 000000000..b3a64ce2f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageInspectWithRaw returns the image information and its raw representation.
+func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ImageInspect{}, nil, imageNotFoundError{imageID}
+ }
+ return types.ImageInspect{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ImageInspect{}, nil, err
+ }
+
+ var response types.ImageInspect
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
new file mode 100644
index 000000000..f26464f67
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_list.go
@@ -0,0 +1,45 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+// ImageList returns a list of images in the docker host.
+func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) {
+ var images []types.ImageSummary
+ query := url.Values{}
+
+ optionFilters := options.Filters
+ referenceFilters := optionFilters.Get("reference")
+ if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 {
+ query.Set("filter", referenceFilters[0])
+ for _, filterValue := range referenceFilters {
+ optionFilters.Del("reference", filterValue)
+ }
+ }
+ if optionFilters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
+ if err != nil {
+ return images, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ serverResp, err := cli.get(ctx, "/images/json", query, nil)
+ if err != nil {
+ return images, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&images)
+ ensureReaderClosed(serverResp)
+ return images, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go
new file mode 100644
index 000000000..77aaf1af3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_load.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ImageLoad loads an image in the docker host from the client host.
+// It's up to the caller to close the io.ReadCloser in the
+// ImageLoadResponse returned by this function.
+func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
+ v := url.Values{}
+ v.Set("quiet", "0")
+ if quiet {
+ v.Set("quiet", "1")
+ }
+ headers := map[string][]string{"Content-Type": {"application/x-tar"}}
+ resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
+ if err != nil {
+ return types.ImageLoadResponse{}, err
+ }
+ return types.ImageLoadResponse{
+ Body: resp.body,
+ JSON: resp.header.Get("Content-Type") == "application/json",
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go
new file mode 100644
index 000000000..5ef98b7f0
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ImagesPrune requests the daemon to delete unused data
+func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) {
+ var report types.ImagesPruneReport
+
+ if err := cli.NewVersionError("1.25", "image prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go
new file mode 100644
index 000000000..a72b9bf7f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_pull.go
@@ -0,0 +1,61 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImagePull requests the docker host to pull an image from a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+//
+// FIXME(vdemeester): there is currently used in a few way in docker/docker
+// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
+// - if in trusted content, ref is used to pass the reference name, and tag for the digest
+func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(refStr)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ if !options.All {
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ }
+
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+// getAPITagFromNamedRef returns a tag from the specified reference.
+// This function is necessary as long as the docker "server" api expects
+// digests to be sent as tags and makes a distinction between the name
+// and tag/digest part of a reference.
+func getAPITagFromNamedRef(ref reference.Named) string {
+ if digested, ok := ref.(reference.Digested); ok {
+ return digested.Digest().String()
+ }
+ ref = reference.TagNameOnly(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ return tagged.Tag()
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go
new file mode 100644
index 000000000..410d2fb91
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_push.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImagePush requests the docker host to push an image to a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(image)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return nil, errors.New("cannot push a digest reference")
+ }
+
+ tag := ""
+ name := reference.FamiliarName(ref)
+
+ if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged {
+ tag = nameTaggedRef.Tag()
+ }
+
+ query := url.Values{}
+ query.Set("tag", tag)
+
+ resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
new file mode 100644
index 000000000..6921209ee
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_remove.go
@@ -0,0 +1,31 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageRemove removes an image from the docker host.
+func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) {
+ query := url.Values{}
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+ if !options.PruneChildren {
+ query.Set("noprune", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var dels []types.ImageDeleteResponseItem
+ err = json.NewDecoder(resp.body).Decode(&dels)
+ ensureReaderClosed(resp)
+ return dels, err
+}
diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go
new file mode 100644
index 000000000..ecac880a3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_save.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
+// It's up to the caller to store the images and close the stream.
+func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
+ query := url.Values{
+ "names": imageIDs,
+ }
+
+ resp, err := cli.get(ctx, "/images/get", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
new file mode 100644
index 000000000..b0fcd5c23
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_search.go
@@ -0,0 +1,51 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// ImageSearch makes the docker host to search by a term in a remote registry.
+// The list of results is not sorted in any fashion.
+func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
+ var results []registry.SearchResult
+ query := url.Values{}
+ query.Set("term", term)
+ query.Set("limit", fmt.Sprintf("%d", options.Limit))
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return results, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return results, privilegeErr
+ }
+ resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return results, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&results)
+ ensureReaderClosed(resp)
+ return results, err
+}
+
+func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.get(ctx, "/images/search", query, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go
new file mode 100644
index 000000000..8924f71eb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/image_tag.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// ImageTag tags an image in the docker host
+func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
+ if _, err := reference.ParseAnyReference(source); err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
+ }
+
+ ref, err := reference.ParseNormalizedNamed(target)
+ if err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target)
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return errors.New("refusing to create a tag with a digest reference")
+ }
+
+ ref = reference.TagNameOnly(ref)
+
+ query := url.Values{}
+ query.Set("repo", reference.FamiliarName(ref))
+ if tagged, ok := ref.(reference.Tagged); ok {
+ query.Set("tag", tagged.Tag())
+ }
+
+ resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go
new file mode 100644
index 000000000..ac0796122
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/info.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// Info returns information about the docker server.
+func (cli *Client) Info(ctx context.Context) (types.Info, error) {
+ var info types.Info
+ serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
+ if err != nil {
+ return info, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
+ return info, fmt.Errorf("Error reading remote info: %v", err)
+ }
+
+ return info, nil
+}
diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go
new file mode 100644
index 000000000..acd4de1db
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/interface.go
@@ -0,0 +1,194 @@
+package client
+
+import (
+ "io"
+ "net"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
+type CommonAPIClient interface {
+ ConfigAPIClient
+ ContainerAPIClient
+ DistributionAPIClient
+ ImageAPIClient
+ NodeAPIClient
+ NetworkAPIClient
+ PluginAPIClient
+ ServiceAPIClient
+ SwarmAPIClient
+ SecretAPIClient
+ SystemAPIClient
+ VolumeAPIClient
+ ClientVersion() string
+ DaemonHost() string
+ ServerVersion(ctx context.Context) (types.Version, error)
+ NegotiateAPIVersion(ctx context.Context)
+ NegotiateAPIVersionPing(types.Ping)
+ DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
+}
+
+// ContainerAPIClient defines API client methods for the containers
+type ContainerAPIClient interface {
+ ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
+ ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
+ ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error)
+ ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error)
+ ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error)
+ ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
+ ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
+ ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
+ ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
+ ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
+ ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
+ ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
+ ContainerKill(ctx context.Context, container, signal string) error
+ ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
+ ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ ContainerPause(ctx context.Context, container string) error
+ ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
+ ContainerRename(ctx context.Context, container, newContainerName string) error
+ ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
+ ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
+ ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
+ ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
+ ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error)
+ ContainerUnpause(ctx context.Context, container string) error
+ ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error)
+ ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error)
+ CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
+ CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
+ ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
+}
+
+// DistributionAPIClient defines API client methods for the registry
+type DistributionAPIClient interface {
+ DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error)
+}
+
+// ImageAPIClient defines API client methods for the images
+type ImageAPIClient interface {
+ ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
+ BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
+ ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
+ ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
+ ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
+ ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
+ ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error)
+ ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
+ ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
+ ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
+ ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error)
+ ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
+ ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
+ ImageTag(ctx context.Context, image, ref string) error
+ ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error)
+}
+
+// NetworkAPIClient defines API client methods for the networks
+type NetworkAPIClient interface {
+ NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error
+ NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
+ NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error
+ NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error)
+ NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error)
+ NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
+ NetworkRemove(ctx context.Context, networkID string) error
+ NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error)
+}
+
+// NodeAPIClient defines API client methods for the nodes
+type NodeAPIClient interface {
+ NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
+ NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+ NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
+ NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+}
+
+// PluginAPIClient defines API client methods for the plugins
+type PluginAPIClient interface {
+ PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error)
+ PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
+ PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
+ PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
+ PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
+ PluginSet(ctx context.Context, name string, args []string) error
+ PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
+ PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error
+}
+
+// ServiceAPIClient defines API client methods for the services
+type ServiceAPIClient interface {
+ ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
+ ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
+ ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+ ServiceRemove(ctx context.Context, serviceID string) error
+ ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error)
+ ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+ TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+}
+
+// SwarmAPIClient defines API client methods for the swarm
+type SwarmAPIClient interface {
+ SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+ SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+ SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
+ SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
+ SwarmLeave(ctx context.Context, force bool) error
+ SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+ SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
+}
+
+// SystemAPIClient defines API client methods for the system
+type SystemAPIClient interface {
+ Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
+ Info(ctx context.Context) (types.Info, error)
+ RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error)
+ DiskUsage(ctx context.Context) (types.DiskUsage, error)
+ Ping(ctx context.Context) (types.Ping, error)
+}
+
+// VolumeAPIClient defines API client methods for the volumes
+type VolumeAPIClient interface {
+ VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error)
+ VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
+ VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
+ VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error)
+ VolumeRemove(ctx context.Context, volumeID string, force bool) error
+ VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error)
+}
+
+// SecretAPIClient defines API client methods for secrets
+type SecretAPIClient interface {
+ SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
+ SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
+ SecretRemove(ctx context.Context, id string) error
+ SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
+ SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
+}
+
+// ConfigAPIClient defines API client methods for configs
+type ConfigAPIClient interface {
+ ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
+ ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
+ ConfigRemove(ctx context.Context, id string) error
+ ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
+ ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
+}
diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go
new file mode 100644
index 000000000..51da98ecd
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/interface_experimental.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+type apiClientExperimental interface {
+ CheckpointAPIClient
+}
+
+// CheckpointAPIClient defines API client methods for the checkpoints
+type CheckpointAPIClient interface {
+ CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
+ CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error
+ CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error)
+}
diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go
new file mode 100644
index 000000000..cc90a3cbb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/interface_stable.go
@@ -0,0 +1,10 @@
+package client
+
+// APIClient is an interface that clients that talk with a docker server must implement.
+type APIClient interface {
+ CommonAPIClient
+ apiClientExperimental
+}
+
+// Ensure that Client always implements APIClient.
+var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go
new file mode 100644
index 000000000..79219ff59
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/login.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// RegistryLogin authenticates the docker server with a given docker registry.
+// It returns unauthorizedError when the authentication fails.
+func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) {
+ resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
+
+ if resp.statusCode == http.StatusUnauthorized {
+ return registry.AuthenticateOKBody{}, unauthorizedError{err}
+ }
+ if err != nil {
+ return registry.AuthenticateOKBody{}, err
+ }
+
+ var response registry.AuthenticateOKBody
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go
new file mode 100644
index 000000000..c022c17b5
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_connect.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/network"
+ "golang.org/x/net/context"
+)
+
+// NetworkConnect connects a container to an existent network in the docker host.
+func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
+ nc := types.NetworkConnect{
+ Container: containerID,
+ EndpointConfig: config,
+ }
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go
new file mode 100644
index 000000000..4067a541f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkCreate creates a new network in the docker host.
+func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
+ networkCreateRequest := types.NetworkCreateRequest{
+ NetworkCreate: options,
+ Name: name,
+ }
+ var response types.NetworkCreateResponse
+ serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
+ if err != nil {
+ return response, err
+ }
+
+ json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go
new file mode 100644
index 000000000..24b58e3c1
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_disconnect.go
@@ -0,0 +1,14 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkDisconnect disconnects a container from an existent network in the docker host.
+func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
+ nd := types.NetworkDisconnect{Container: containerID, Force: force}
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go
new file mode 100644
index 000000000..848c9799f
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_inspect.go
@@ -0,0 +1,50 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkInspect returns the information for a specific network configured in the docker host.
+func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) {
+ networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options)
+ return networkResource, err
+}
+
+// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
+func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) {
+ var (
+ networkResource types.NetworkResource
+ resp serverResponse
+ err error
+ )
+ query := url.Values{}
+ if options.Verbose {
+ query.Set("verbose", "true")
+ }
+ if options.Scope != "" {
+ query.Set("scope", options.Scope)
+ }
+ resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return networkResource, nil, networkNotFoundError{networkID}
+ }
+ return networkResource, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return networkResource, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&networkResource)
+ return networkResource, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go
new file mode 100644
index 000000000..e566a93e2
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_list.go
@@ -0,0 +1,31 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// NetworkList returns the list of networks configured in the docker host.
+func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
+ query := url.Values{}
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+ var networkResources []types.NetworkResource
+ resp, err := cli.get(ctx, "/networks", query, nil)
+ if err != nil {
+ return networkResources, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&networkResources)
+ ensureReaderClosed(resp)
+ return networkResources, err
+}
diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go
new file mode 100644
index 000000000..7352a7f0c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// NetworksPrune requests the daemon to delete unused networks
+func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) {
+ var report types.NetworksPruneReport
+
+ if err := cli.NewVersionError("1.25", "network prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving network prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go
new file mode 100644
index 000000000..6bd674892
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/network_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// NetworkRemove removes an existent network from the docker host.
+func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
+ resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go
new file mode 100644
index 000000000..abf505d29
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeInspectWithRaw returns the node information.
+func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
+ serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Node{}, nil, nodeNotFoundError{nodeID}
+ }
+ return swarm.Node{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Node{}, nil, err
+ }
+
+ var response swarm.Node
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
new file mode 100644
index 000000000..3e8440f08
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_list.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeList returns the list of nodes.
+func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/nodes", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var nodes []swarm.Node
+ err = json.NewDecoder(resp.body).Decode(&nodes)
+ ensureReaderClosed(resp)
+ return nodes, err
+}
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
new file mode 100644
index 000000000..0a77f3d57
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_remove.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+
+ "golang.org/x/net/context"
+)
+
+// NodeRemove removes a Node.
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go
new file mode 100644
index 000000000..3ca976028
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/node_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeUpdate updates a Node.
+func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/parse_logs.go b/vendor/github.com/docker/docker/client/parse_logs.go
new file mode 100644
index 000000000..e427f80a7
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/parse_logs.go
@@ -0,0 +1,41 @@
+package client
+
+// parse_logs.go contains utility helpers for getting information out of docker
+// log lines. really, it only contains ParseDetails right now. maybe in the
+// future there will be some desire to parse log messages back into a struct?
+// that would go here if we did
+
+import (
+ "net/url"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// ParseLogDetails takes a details string of key value pairs in the form
+// "k=v,l=w", where the keys and values are url query escaped, and each pair
+// is separated by a comma, returns a map. returns an error if the details
+// string is not in a valid format
+// the exact form of details encoding is implemented in
+// api/server/httputils/write_log_stream.go
+func ParseLogDetails(details string) (map[string]string, error) {
+ pairs := strings.Split(details, ",")
+ detailsMap := make(map[string]string, len(pairs))
+ for _, pair := range pairs {
+ p := strings.SplitN(pair, "=", 2)
+ // if there is no equals sign, we will only get 1 part back
+ if len(p) != 2 {
+ return nil, errors.New("invalid details format")
+ }
+ k, err := url.QueryUnescape(p[0])
+ if err != nil {
+ return nil, err
+ }
+ v, err := url.QueryUnescape(p[1])
+ if err != nil {
+ return nil, err
+ }
+ detailsMap[k] = v
+ }
+ return detailsMap, nil
+}
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
new file mode 100644
index 000000000..a4c2e2c4d
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers
+func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
+ var ping types.Ping
+ req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil)
+ if err != nil {
+ return ping, err
+ }
+ serverResp, err := cli.doRequest(ctx, req)
+ if err != nil {
+ return ping, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if serverResp.header != nil {
+ ping.APIVersion = serverResp.header.Get("API-Version")
+
+ if serverResp.header.Get("Docker-Experimental") == "true" {
+ ping.Experimental = true
+ }
+ ping.OSType = serverResp.header.Get("OSType")
+ }
+
+ err = cli.checkResponseErr(serverResp)
+ return ping, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go
new file mode 100644
index 000000000..27954aa57
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_create.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginCreate creates a plugin
+func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error {
+ headers := http.Header(make(map[string][]string))
+ headers.Set("Content-Type", "application/x-tar")
+
+ query := url.Values{}
+ query.Set("name", createOptions.RepoName)
+
+ resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers)
+ if err != nil {
+ return err
+ }
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go
new file mode 100644
index 000000000..30467db74
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_disable.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginDisable disables a plugin
+func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go
new file mode 100644
index 000000000..95517c4b8
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_enable.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginEnable enables a plugin
+func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error {
+ query := url.Values{}
+ query.Set("timeout", strconv.Itoa(options.Timeout))
+
+ resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go
new file mode 100644
index 000000000..89f39ee2c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_inspect.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginInspectWithRaw inspects an existing plugin
+func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
+ resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return nil, nil, pluginNotFoundError{name}
+ }
+ return nil, nil, err
+ }
+
+ defer ensureReaderClosed(resp)
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return nil, nil, err
+ }
+ var p types.Plugin
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&p)
+ return &p, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go
new file mode 100644
index 000000000..ce3e0506e
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_install.go
@@ -0,0 +1,113 @@
+package client
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// PluginInstall installs a plugin
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ // set name for plugin pull, if empty should default to remote reference
+ query.Set("name", name)
+
+ resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+
+ name = resp.header.Get("Docker-Plugin-Name")
+
+ pr, pw := io.Pipe()
+ go func() { // todo: the client should probably be designed more around the actual api
+ _, err := io.Copy(pw, resp.body)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer func() {
+ if err != nil {
+ delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
+ ensureReaderClosed(delResp)
+ }
+ }()
+ if len(options.Args) > 0 {
+ if err := cli.PluginSet(ctx, name, options.Args); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+
+ if options.Disabled {
+ pw.Close()
+ return
+ }
+
+ enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
+ pw.CloseWithError(enableErr)
+ }()
+ return pr, nil
+}
+
+func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.get(ctx, "/plugins/privileges", query, headers)
+}
+
+func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/plugins/pull", query, privileges, headers)
+}
+
+func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) {
+ resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ // todo: do inspect before to check existing name before checking privileges
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ ensureReaderClosed(resp)
+ return nil, privilegeErr
+ }
+ options.RegistryAuth = newAuthHeader
+ resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ }
+ if err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+
+ var privileges types.PluginPrivileges
+ if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+ ensureReaderClosed(resp)
+
+ if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
+ accept, err := options.AcceptPermissionsFunc(privileges)
+ if err != nil {
+ return nil, err
+ }
+ if !accept {
+ return nil, pluginPermissionDenied{options.RemoteRef}
+ }
+ }
+ return privileges, nil
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
new file mode 100644
index 000000000..3acde3b96
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// PluginList returns the installed plugins
+func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) {
+ var plugins types.PluginsListResponse
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return plugins, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/plugins", query, nil)
+ if err != nil {
+ return plugins, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&plugins)
+ ensureReaderClosed(resp)
+ return plugins, err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go
new file mode 100644
index 000000000..1e5f96325
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_push.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+// PluginPush pushes a plugin to a registry
+func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go
new file mode 100644
index 000000000..b017e4d34
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_remove.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginRemove removes a plugin
+func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go
new file mode 100644
index 000000000..3260d2a90
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_set.go
@@ -0,0 +1,12 @@
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginSet modifies settings for an existing plugin
+func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
+ resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go
new file mode 100644
index 000000000..049ebfa2a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go
@@ -0,0 +1,39 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// PluginUpgrade upgrades a plugin
+func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+ if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers)
+}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
new file mode 100644
index 000000000..3e7d43fea
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -0,0 +1,262 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+)
+
+// serverResponse is a wrapper for http API responses.
+type serverResponse struct {
+ body io.ReadCloser
+ header http.Header
+ statusCode int
+ reqURL *url.URL
+}
+
+// head sends an http request to the docker API using the method HEAD.
+func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "HEAD", path, query, nil, headers)
+}
+
+// get sends an http request to the docker API using the method GET with a specific Go context.
+func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "GET", path, query, nil, headers)
+}
+
+// post sends an http request to the docker API using the method POST with a specific Go context.
+func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ return cli.sendRequest(ctx, "POST", path, query, body, headers)
+}
+
+func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "POST", path, query, body, headers)
+}
+
+// put sends an http request to the docker API using the method PUT.
+func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ return cli.sendRequest(ctx, "PUT", path, query, body, headers)
+}
+
+// putRaw sends an http request to the docker API using the method PUT.
+func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "PUT", path, query, body, headers)
+}
+
+// delete sends an http request to the docker API using the method DELETE.
+func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "DELETE", path, query, nil, headers)
+}
+
+type headers map[string][]string
+
+func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) {
+ if obj == nil {
+ return nil, headers, nil
+ }
+
+ body, err := encodeData(obj)
+ if err != nil {
+ return nil, headers, err
+ }
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+ headers["Content-Type"] = []string{"application/json"}
+ return body, headers, nil
+}
+
+func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) {
+ expectedPayload := (method == "POST" || method == "PUT")
+ if expectedPayload && body == nil {
+ body = bytes.NewReader([]byte{})
+ }
+
+ req, err := http.NewRequest(method, path, body)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, headers)
+
+ if cli.proto == "unix" || cli.proto == "npipe" {
+ // For local communications, it doesn't matter what the host is. We just
+ // need a valid and meaningful host name. (See #189)
+ req.Host = "docker"
+ }
+
+ req.URL.Host = cli.addr
+ req.URL.Scheme = cli.scheme
+
+ if expectedPayload && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "text/plain")
+ }
+ return req, nil
+}
+
+func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) {
+ req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ resp, err := cli.doRequest(ctx, req)
+ if err != nil {
+ return resp, err
+ }
+ if err := cli.checkResponseErr(resp); err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) {
+ serverResp := serverResponse{statusCode: -1, reqURL: req.URL}
+
+ resp, err := ctxhttp.Do(ctx, cli.client, req)
+ if err != nil {
+ if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
+ return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
+ }
+
+ if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
+ return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
+ }
+
+ // Don't decorate context sentinel errors; users may be comparing to
+ // them directly.
+ switch err {
+ case context.Canceled, context.DeadlineExceeded:
+ return serverResp, err
+ }
+
+ if nErr, ok := err.(*url.Error); ok {
+ if nErr, ok := nErr.Err.(*net.OpError); ok {
+ if os.IsPermission(nErr.Err) {
+ return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
+ }
+ }
+ }
+
+ if err, ok := err.(net.Error); ok {
+ if err.Timeout() {
+ return serverResp, ErrorConnectionFailed(cli.host)
+ }
+ if !err.Temporary() {
+ if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
+ return serverResp, ErrorConnectionFailed(cli.host)
+ }
+ }
+ }
+
+ // Although there's not a strongly typed error for this in go-winio,
+ // lots of people are using the default configuration for the docker
+ // daemon on Windows where the daemon is listening on a named pipe
+ // `//./pipe/docker_engine, and the client must be running elevated.
+ // Give users a clue rather than the not-overly useful message
+ // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
+ // open //./pipe/docker_engine: The system cannot find the file specified.`.
+ // Note we can't string compare "The system cannot find the file specified" as
+ // this is localised - for example in French the error would be
+ // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
+ if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
+ err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.")
+ }
+
+ return serverResp, errors.Wrap(err, "error during connect")
+ }
+
+ if resp != nil {
+ serverResp.statusCode = resp.StatusCode
+ serverResp.body = resp.Body
+ serverResp.header = resp.Header
+ }
+ return serverResp, nil
+}
+
+func (cli *Client) checkResponseErr(serverResp serverResponse) error {
+ if serverResp.statusCode >= 200 && serverResp.statusCode < 400 {
+ return nil
+ }
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return err
+ }
+ if len(body) == 0 {
+ return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
+ }
+
+ var ct string
+ if serverResp.header != nil {
+ ct = serverResp.header.Get("Content-Type")
+ }
+
+ var errorMessage string
+ if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" {
+ var errorResponse types.ErrorResponse
+ if err := json.Unmarshal(body, &errorResponse); err != nil {
+ return fmt.Errorf("Error reading JSON: %v", err)
+ }
+ errorMessage = errorResponse.Message
+ } else {
+ errorMessage = string(body)
+ }
+
+ return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage))
+}
+
+func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request {
+ // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
+ // then the user can't change OUR headers
+ for k, v := range cli.customHTTPHeaders {
+ if versions.LessThan(cli.version, "1.25") && k == "User-Agent" {
+ continue
+ }
+ req.Header.Set(k, v)
+ }
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+ }
+ return req
+}
+
+func encodeData(data interface{}) (*bytes.Buffer, error) {
+ params := bytes.NewBuffer(nil)
+ if data != nil {
+ if err := json.NewEncoder(params).Encode(data); err != nil {
+ return nil, err
+ }
+ }
+ return params, nil
+}
+
+func ensureReaderClosed(response serverResponse) {
+ if response.body != nil {
+ // Drain up to 512 bytes and close the body to let the Transport reuse the connection
+ io.CopyN(ioutil.Discard, response.body, 512)
+ response.body.Close()
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go
new file mode 100644
index 000000000..4354afea6
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretCreate creates a new Secret.
+func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
+ var response types.SecretCreateResponse
+ if err := cli.NewVersionError("1.25", "secret create"); err != nil {
+ return response, err
+ }
+ resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go
new file mode 100644
index 000000000..9b602972b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_inspect.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretInspectWithRaw returns the secret information with raw data
+func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) {
+ if err := cli.NewVersionError("1.25", "secret inspect"); err != nil {
+ return swarm.Secret{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return swarm.Secret{}, nil, secretNotFoundError{id}
+ }
+ return swarm.Secret{}, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return swarm.Secret{}, nil, err
+ }
+
+ var secret swarm.Secret
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&secret)
+
+ return secret, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
new file mode 100644
index 000000000..0d33ecfbc
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_list.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretList returns the list of secrets.
+func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
+ if err := cli.NewVersionError("1.25", "secret list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/secrets", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var secrets []swarm.Secret
+ err = json.NewDecoder(resp.body).Decode(&secrets)
+ ensureReaderClosed(resp)
+ return secrets, err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go
new file mode 100644
index 000000000..c5e37af17
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_remove.go
@@ -0,0 +1,13 @@
+package client
+
+import "golang.org/x/net/context"
+
+// SecretRemove removes a Secret.
+func (cli *Client) SecretRemove(ctx context.Context, id string) error {
+ if err := cli.NewVersionError("1.25", "secret remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go
new file mode 100644
index 000000000..875a4c901
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/secret_update.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretUpdate attempts to update a Secret
+func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
+ if err := cli.NewVersionError("1.25", "secret update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
new file mode 100644
index 000000000..a36839443
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -0,0 +1,156 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// ServiceCreate creates a new Service.
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
+ var distErr error
+
+ headers := map[string][]string{
+ "version": {cli.version},
+ }
+
+ if options.EncodedRegistryAuth != "" {
+ headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
+ }
+
+ // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
+ if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
+ service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
+ }
+
+ if err := validateServiceSpec(service); err != nil {
+ return types.ServiceCreateResponse{}, err
+ }
+
+ // ensure that the image is tagged
+ var imgPlatforms []swarm.Platform
+ if service.TaskTemplate.ContainerSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.ContainerSpec.Image = img
+ }
+ }
+ }
+
+ // ensure that the image is tagged
+ if service.TaskTemplate.PluginSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.PluginSpec.Remote = img
+ }
+ }
+ }
+
+ if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement = &swarm.Placement{}
+ }
+ if len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement.Platforms = imgPlatforms
+ }
+
+ var response types.ServiceCreateResponse
+ resp, err := cli.post(ctx, "/services/create", nil, service, headers)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+
+ if distErr != nil {
+ response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ }
+
+ ensureReaderClosed(resp)
+ return response, err
+}
+
+func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) {
+ distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
+ imageWithDigest := image
+ var platforms []swarm.Platform
+ if err != nil {
+ return "", nil, err
+ }
+
+ imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
+
+ if len(distributionInspect.Platforms) > 0 {
+ platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
+ for _, p := range distributionInspect.Platforms {
+ platforms = append(platforms, swarm.Platform{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ })
+ }
+ }
+ return imageWithDigest, platforms, err
+}
+
+// imageWithDigestString takes an image string and a digest, and updates
+// the image string if it didn't originally contain a digest. It returns
+// an empty string if there are no updates.
+func imageWithDigestString(image string, dgst digest.Digest) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ if _, isCanonical := namedRef.(reference.Canonical); !isCanonical {
+ // ensure that image gets a default tag if none is provided
+ img, err := reference.WithDigest(namedRef, dgst)
+ if err == nil {
+ return reference.FamiliarString(img)
+ }
+ }
+ }
+ return ""
+}
+
+// imageWithTagString takes an image string, and returns a tagged image
+// string, adding a 'latest' tag if one was not provided. It returns an
+// emptry string if a canonical reference was provided
+func imageWithTagString(image string) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ return reference.FamiliarString(reference.TagNameOnly(namedRef))
+ }
+ return ""
+}
+
+// digestWarning constructs a formatted warning string using the
+// image name that could not be pinned by digest. The formatting
+// is hardcoded, but could me made smarter in the future
+func digestWarning(image string) string {
+ return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
+}
+
+func validateServiceSpec(s swarm.ServiceSpec) error {
+ if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil {
+ return errors.New("must not specify both a container spec and a plugin spec in the task template")
+ }
+ if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin {
+ return errors.New("mismatched runtime with plugin spec")
+ }
+ if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) {
+ return errors.New("mismatched runtime with container spec")
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
new file mode 100644
index 000000000..d7e051e3a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceInspectWithRaw returns the service information and the raw data.
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
+ query := url.Values{}
+ query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
+ serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Service{}, nil, serviceNotFoundError{serviceID}
+ }
+ return swarm.Service{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Service{}, nil, err
+ }
+
+ var response swarm.Service
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
new file mode 100644
index 000000000..c29e6d407
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceList returns the list of services.
+func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/services", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var services []swarm.Service
+ err = json.NewDecoder(resp.body).Decode(&services)
+ ensureReaderClosed(resp)
+ return services, err
+}
diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go
new file mode 100644
index 000000000..24384e3ec
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_logs.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// ServiceLogs returns the logs generated by a service in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go
new file mode 100644
index 000000000..a9331f92c
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ServiceRemove kills and removes a service.
+func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
+ resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go
new file mode 100644
index 000000000..8764f299a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/service_update.go
@@ -0,0 +1,92 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceUpdate updates a Service.
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) {
+ var (
+ query = url.Values{}
+ distErr error
+ )
+
+ headers := map[string][]string{
+ "version": {cli.version},
+ }
+
+ if options.EncodedRegistryAuth != "" {
+ headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
+ }
+
+ if options.RegistryAuthFrom != "" {
+ query.Set("registryAuthFrom", options.RegistryAuthFrom)
+ }
+
+ if options.Rollback != "" {
+ query.Set("rollback", options.Rollback)
+ }
+
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+
+ if err := validateServiceSpec(service); err != nil {
+ return types.ServiceUpdateResponse{}, err
+ }
+
+ var imgPlatforms []swarm.Platform
+ // ensure that the image is tagged
+ if service.TaskTemplate.ContainerSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.ContainerSpec.Image = img
+ }
+ }
+ }
+
+ // ensure that the image is tagged
+ if service.TaskTemplate.PluginSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.PluginSpec.Remote = img
+ }
+ }
+ }
+
+ if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement = &swarm.Placement{}
+ }
+ if len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement.Platforms = imgPlatforms
+ }
+
+ var response types.ServiceUpdateResponse
+ resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+
+ if distErr != nil {
+ response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ }
+
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go
new file mode 100644
index 000000000..8ee916213
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/session.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net"
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// DialSession returns a connection that can be used communication with daemon
+func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
+ req, err := http.NewRequest("POST", "/session", nil)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, meta)
+
+ return cli.setupHijackConn(req, proto)
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
new file mode 100644
index 000000000..be28d3262
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// SwarmGetUnlockKey retrieves the swarm's unlock key.
+func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
+ serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
+ if err != nil {
+ return types.SwarmUnlockKeyResponse{}, err
+ }
+
+ var response types.SwarmUnlockKeyResponse
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go
new file mode 100644
index 000000000..9e65e1cca
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_init.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInit initializes the swarm.
+func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+ serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
+ if err != nil {
+ return "", err
+ }
+
+ var response string
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go
new file mode 100644
index 000000000..77e72f846
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_inspect.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInspect inspects the swarm.
+func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+ serverResp, err := cli.get(ctx, "/swarm", nil, nil)
+ if err != nil {
+ return swarm.Swarm{}, err
+ }
+
+ var response swarm.Swarm
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go
new file mode 100644
index 000000000..19e5192b9
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_join.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmJoin joins the swarm.
+func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+ resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go
new file mode 100644
index 000000000..3a205cf3b
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_leave.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// SwarmLeave leaves the swarm.
+func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
+ query := url.Values{}
+ if force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go
new file mode 100644
index 000000000..9ee441fed
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_unlock.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUnlock unlocks locked swarm.
+func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error {
+ serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil)
+ ensureReaderClosed(serverResp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go
new file mode 100644
index 000000000..7245fd4e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/swarm_update.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "fmt"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUpdate updates the swarm.
+func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken))
+ query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken))
+ query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey))
+ resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go
new file mode 100644
index 000000000..bc8058fc3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_inspect.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+
+ "golang.org/x/net/context"
+)
+
+// TaskInspectWithRaw returns the task information and its raw representation..
+func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+ serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Task{}, nil, taskNotFoundError{taskID}
+ }
+ return swarm.Task{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ var response swarm.Task
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
new file mode 100644
index 000000000..66324da95
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// TaskList returns the list of tasks.
+func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/tasks", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var tasks []swarm.Task
+ err = json.NewDecoder(resp.body).Decode(&tasks)
+ ensureReaderClosed(resp)
+ return tasks, err
+}
diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go
new file mode 100644
index 000000000..2ed19543a
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/task_logs.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// TaskLogs returns the logs generated by a task in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go
new file mode 100644
index 000000000..401ab15d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/transport.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "crypto/tls"
+ "net/http"
+)
+
+// transportFunc allows us to inject a mock transport for testing. We define it
+// here so we can detect the tlsconfig and return nil for only this type.
+type transportFunc func(*http.Request) (*http.Response, error)
+
+func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) {
+ return tf(req)
+}
+
+// resolveTLSConfig attempts to resolve the TLS configuration from the
+// RoundTripper.
+func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
+ switch tr := transport.(type) {
+ case *http.Transport:
+ return tr.TLSClientConfig
+ default:
+ return nil
+ }
+}
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
new file mode 100644
index 000000000..f3d8877df
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/utils.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "net/url"
+ "regexp"
+
+ "github.com/docker/docker/api/types/filters"
+)
+
+var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
+
+// getDockerOS returns the operating system based on the server header from the daemon.
+func getDockerOS(serverHeader string) string {
+ var osType string
+ matches := headerRegexp.FindStringSubmatch(serverHeader)
+ if len(matches) > 0 {
+ osType = matches[1]
+ }
+ return osType
+}
+
+// getFiltersQuery returns a url query with "filters" query term, based on the
+// filters provided.
+func getFiltersQuery(f filters.Args) (url.Values, error) {
+ query := url.Values{}
+ if f.Len() > 0 {
+ filterJSON, err := filters.ToParam(f)
+ if err != nil {
+ return query, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ return query, nil
+}
diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go
new file mode 100644
index 000000000..933ceb4a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/version.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ServerVersion returns information of the docker client and server host.
+func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
+ resp, err := cli.get(ctx, "/version", nil, nil)
+ if err != nil {
+ return types.Version{}, err
+ }
+
+ var server types.Version
+ err = json.NewDecoder(resp.body).Decode(&server)
+ ensureReaderClosed(resp)
+ return server, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go
new file mode 100644
index 000000000..9620c87cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_create.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// VolumeCreate creates a volume in the docker host.
+func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) {
+ var volume types.Volume
+ resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
+ if err != nil {
+ return volume, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&volume)
+ ensureReaderClosed(resp)
+ return volume, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go
new file mode 100644
index 000000000..3860e9b22
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// VolumeInspect returns the information about a specific volume in the docker host.
+func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
+ volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
+ return volume, err
+}
+
+// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
+func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
+ var volume types.Volume
+ resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return volume, nil, volumeNotFoundError{volumeID}
+ }
+ return volume, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return volume, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&volume)
+ return volume, body, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go
new file mode 100644
index 000000000..32247ce11
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/filters"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// VolumeList returns the volumes configured in the docker host.
+func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) {
+ var volumes volumetypes.VolumesListOKBody
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return volumes, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/volumes", query, nil)
+ if err != nil {
+ return volumes, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&volumes)
+ ensureReaderClosed(resp)
+ return volumes, err
+}
diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go
new file mode 100644
index 000000000..2e7fea774
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// VolumesPrune requests the daemon to delete unused data
+func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) {
+ var report types.VolumesPruneReport
+
+ if err := cli.NewVersionError("1.25", "volume prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving volume prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go
new file mode 100644
index 000000000..6c26575b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/client/volume_remove.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+// VolumeRemove removes a volume from the docker host.
+func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
+ query := url.Values{}
+ if versions.GreaterThanOrEqualTo(cli.version, "1.25") {
+ if force {
+ query.Set("force", "1")
+ }
+ }
+ resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md
new file mode 100644
index 000000000..c4b78a8ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/README.md
@@ -0,0 +1,11 @@
+pkg/ is a collection of utility packages used by the Docker project without being specific to its internals.
+
+Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible.
+If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the
+Docker organization, to facilitate re-use by other projects. However that is not the priority.
+
+The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core
+Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad!
+
+Because utility packages are small and neatly separated from the rest of the codebase, they are a good
+place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them!
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
new file mode 100644
index 000000000..012fe52a2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
@@ -0,0 +1,23 @@
+// +build linux
+
+package homedir
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/idtools"
+)
+
+// GetStatic returns the home directory for the current user without calling
+// os/user.Current(). This is useful for static-linked binary on glibc-based
+// system, because a call to os/user.Current() in a static binary leads to
+// segfault due to a glibc issue that won't be fixed in a short term.
+// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
+func GetStatic() (string, error) {
+ uid := os.Getuid()
+ usr, err := idtools.LookupUID(uid)
+ if err != nil {
+ return "", err
+ }
+ return usr.Home, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
new file mode 100644
index 000000000..6b96b856f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package homedir
+
+import (
+ "errors"
+)
+
+// GetStatic is not needed for non-linux systems.
+// (Precisely, it is needed only for glibc-based linux systems.)
+func GetStatic() (string, error) {
+ return "", errors.New("homedir.GetStatic() is not supported on this system")
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
new file mode 100644
index 000000000..f2a20ea8f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
@@ -0,0 +1,34 @@
+// +build !windows
+
+package homedir
+
+import (
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ return "HOME"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ home := os.Getenv(Key())
+ if home == "" {
+ if u, err := user.CurrentUser(); err == nil {
+ return u.Home
+ }
+ }
+ return home
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ return "~"
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go
new file mode 100644
index 000000000..fafdb2bbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go
@@ -0,0 +1,24 @@
+package homedir
+
+import (
+ "os"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ return "USERPROFILE"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ return os.Getenv(Key())
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ return "%USERPROFILE%" // be careful while using in format functions
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 000000000..68a072db2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,279 @@
+package idtools
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+type subIDRange struct {
+ Start int
+ Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int { return len(e) }
+func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+)
+
+// MkdirAllAs creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+// Deprecated: Use MkdirAllAndChown
+func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
+}
+
+// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+// Deprecated: Use MkdirAndChown with a IDPair
+func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
+}
+
+// MkdirAllAndChown creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
+}
+
+// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
+}
+
+// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
+ return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ uid, err := toHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ return uid, gid, nil
+}
+
+// toContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func toContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// toHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func toHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// IDPair is a UID and GID pair
+type IDPair struct {
+ UID int
+ GID int
+}
+
+// IDMappings contains a mappings of UIDs and GIDs
+type IDMappings struct {
+ uids []IDMap
+ gids []IDMap
+}
+
+// NewIDMappings takes a requested user and group name and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func NewIDMappings(username, groupname string) (*IDMappings, error) {
+ subuidRanges, err := parseSubuid(username)
+ if err != nil {
+ return nil, err
+ }
+ subgidRanges, err := parseSubgid(groupname)
+ if err != nil {
+ return nil, err
+ }
+ if len(subuidRanges) == 0 {
+ return nil, fmt.Errorf("No subuid ranges found for user %q", username)
+ }
+ if len(subgidRanges) == 0 {
+ return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
+ }
+
+ return &IDMappings{
+ uids: createIDMap(subuidRanges),
+ gids: createIDMap(subgidRanges),
+ }, nil
+}
+
+// NewIDMappingsFromMaps creates a new mapping from two slices
+// Deprecated: this is a temporary shim while transitioning to IDMapping
+func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
+ return &IDMappings{uids: uids, gids: gids}
+}
+
+// RootPair returns a uid and gid pair for the root user. The error is ignored
+// because a root user always exists, and the defaults are correct when the uid
+// and gid maps are empty.
+func (i *IDMappings) RootPair() IDPair {
+ uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
+ return IDPair{UID: uid, GID: gid}
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
+ var err error
+ target := i.RootPair()
+
+ if pair.UID != target.UID {
+ target.UID, err = toHost(pair.UID, i.uids)
+ if err != nil {
+ return target, err
+ }
+ }
+
+ if pair.GID != target.GID {
+ target.GID, err = toHost(pair.GID, i.gids)
+ }
+ return target, err
+}
+
+// ToContainer returns the container UID and GID for the host uid and gid
+func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
+ uid, err := toContainer(pair.UID, i.uids)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toContainer(pair.GID, i.gids)
+ return uid, gid, err
+}
+
+// Empty returns true if there are no id mappings
+func (i *IDMappings) Empty() bool {
+ return len(i.uids) == 0 && len(i.gids) == 0
+}
+
+// UIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IDMappings) UIDs() []IDMap {
+ return i.uids
+}
+
+// GIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IDMappings) GIDs() []IDMap {
+ return i.gids
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+ idMap := []IDMap{}
+
+ // sort the ranges by lowest ID first
+ sort.Sort(subidRanges)
+ containerID := 0
+ for _, idrange := range subidRanges {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: idrange.Start,
+ Size: idrange.Length,
+ })
+ containerID = containerID + idrange.Length
+ }
+ return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+ var rangeList ranges
+
+ subidFile, err := os.Open(path)
+ if err != nil {
+ return rangeList, err
+ }
+ defer subidFile.Close()
+
+ s := bufio.NewScanner(subidFile)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return rangeList, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" || strings.HasPrefix(text, "#") {
+ continue
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) != 3 {
+ return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ }
+ if parts[0] == username || username == "ALL" {
+ startid, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ length, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ rangeList = append(rangeList, subIDRange{startid, length})
+ }
+ }
+ return rangeList, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 000000000..8701bb7fa
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,204 @@
+// +build !windows
+
+package idtools
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+var (
+ entOnce sync.Once
+ getentCmd string
+)
+
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ paths = []string{path}
+ } else if err == nil && chownExisting {
+ // short-circuit--we were called with an existing directory and chown was requested
+ return os.Chown(path, ownerUID, ownerGID)
+ } else if err == nil {
+ // nothing to do; directory path fully exists already and chown was NOT requested
+ return nil
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ return err
+ }
+ } else {
+ if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+func CanAccess(path string, pair IDPair) bool {
+ statInfo, err := system.Stat(path)
+ if err != nil {
+ return false
+ }
+ fileMode := os.FileMode(statInfo.Mode())
+ permBits := fileMode.Perm()
+ return accessible(statInfo.UID() == uint32(pair.UID),
+ statInfo.GID() == uint32(pair.GID), permBits)
+}
+
+func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
+ if isOwner && (perms&0100 == 0100) {
+ return true
+ }
+ if isGroup && (perms&0010 == 0010) {
+ return true
+ }
+ if perms&0001 == 0001 {
+ return true
+ }
+ return false
+}
+
+// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUser(username string) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUser(username)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
+ if err != nil {
+ return user.User{}, err
+ }
+ return usr, nil
+}
+
+// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUID(uid int) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUid(uid)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
+}
+
+func getentUser(args string) (user.User, error) {
+ reader, err := callGetent(args)
+ if err != nil {
+ return user.User{}, err
+ }
+ users, err := user.ParsePasswd(reader)
+ if err != nil {
+ return user.User{}, err
+ }
+ if len(users) == 0 {
+ return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
+ }
+ return users[0], nil
+}
+
+// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGroup(groupname string) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGroup(groupname)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
+}
+
+// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGID(gid int) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGid(gid)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(fmt.Sprintf("%s %d", "group", gid))
+}
+
+func getentGroup(args string) (user.Group, error) {
+ reader, err := callGetent(args)
+ if err != nil {
+ return user.Group{}, err
+ }
+ groups, err := user.ParseGroup(reader)
+ if err != nil {
+ return user.Group{}, err
+ }
+ if len(groups) == 0 {
+ return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
+ }
+ return groups[0], nil
+}
+
+func callGetent(args string) (io.Reader, error) {
+ entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
+ // if no `getent` command on host, can't do anything else
+ if getentCmd == "" {
+ return nil, fmt.Errorf("")
+ }
+ out, err := execCmd(getentCmd, args)
+ if err != nil {
+ exitCode, errC := system.GetExitCode(err)
+ if errC != nil {
+ return nil, err
+ }
+ switch exitCode {
+ case 1:
+ return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
+ case 2:
+ terms := strings.Split(args, " ")
+ return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
+ case 3:
+ return nil, fmt.Errorf("getent database doesn't support enumeration")
+ default:
+ return nil, err
+ }
+
+ }
+ return bytes.NewReader(out), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 000000000..45d2878e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package idtools
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// Platforms such as Windows do not support the UID/GID concept. So make this
+// just a wrapper around system.MkdirAll.
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+// Windows does not require/support this function, so always return true
+func CanAccess(path string, pair IDPair) bool {
+ return true
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 000000000..9da7975e2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,164 @@
+package idtools
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
+// useradd -r -s /bin/false <username>
+
+var (
+ once sync.Once
+ userCommand string
+
+ cmdTemplates = map[string]string{
+ "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
+ "useradd": "-r -s /bin/false %s",
+ "usermod": "-%s %d-%d %s",
+ }
+
+ idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+ // default length for a UID/GID subordinate range
+ defaultRangeLen = 65536
+ defaultRangeStart = 100000
+ userMod = "usermod"
+)
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ if err := addUser(name); err != nil {
+ return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ }
+
+ // Query the system for the created uid and gid pair
+ out, err := execCmd("id", name)
+ if err != nil {
+ return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ }
+ matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+ if len(matches) != 3 {
+ return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ }
+ uid, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ }
+ gid, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ }
+
+ // Now we need to create the subuid/subgid ranges for our new user/group (system users
+ // do not get auto-created ranges in subuid/subgid)
+
+ if err := createSubordinateRanges(name); err != nil {
+ return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ }
+ return uid, gid, nil
+}
+
+func addUser(userName string) error {
+ once.Do(func() {
+ // set up which commands are used for adding users/groups dependent on distro
+ if _, err := resolveBinary("adduser"); err == nil {
+ userCommand = "adduser"
+ } else if _, err := resolveBinary("useradd"); err == nil {
+ userCommand = "useradd"
+ }
+ })
+ if userCommand == "" {
+ return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ }
+ args := fmt.Sprintf(cmdTemplates[userCommand], userName)
+ out, err := execCmd(userCommand, args)
+ if err != nil {
+ return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ }
+ return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+ // first, we should verify that ranges weren't automatically created
+ // by the distro tooling
+ ranges, err := parseSubuid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no UID ranges; let's create one
+ startID, err := findNextUIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subuid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+
+ ranges, err = parseSubgid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no GID ranges; let's create one
+ startID, err := findNextGIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subgid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+ return nil
+}
+
+func findNextUIDRange() (int, error) {
+ ranges, err := parseSubuid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+ ranges, err := parseSubgid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+ startID := defaultRangeStart
+ for _, arange := range rangeList {
+ if wouldOverlap(arange, startID) {
+ startID = arange.Start + arange.Length
+ }
+ }
+ return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+ low := ID
+ high := ID + defaultRangeLen
+ if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+ (high <= arange.Start+arange.Length && high >= arange.Start) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 000000000..d98b354cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
new file mode 100644
index 000000000..9703ecbd9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package idtools
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+func resolveBinary(binname string) (string, error) {
+ binaryPath, err := exec.LookPath(binname)
+ if err != nil {
+ return "", err
+ }
+ resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+ if err != nil {
+ return "", err
+ }
+ //only return no error if the final resolved binary basename
+ //matches what was searched for
+ if filepath.Base(resolvedPath) == binname {
+ return resolvedPath, nil
+ }
+ return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+func execCmd(cmd, args string) ([]byte, error) {
+ execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+ return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
new file mode 100644
index 000000000..3d737b3e1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+ buf []byte
+ pos int
+ lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+ n := copy(b.buf[b.pos:cap(b.buf)], p)
+ b.pos += n
+
+ if n < len(p) {
+ if b.pos == cap(b.buf) {
+ return n, errBufferFull
+ }
+ return n, io.ErrShortWrite
+ }
+ return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+ n := copy(p, b.buf[b.lastRead:b.pos])
+ b.lastRead += n
+ return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+ return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+ return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+ b.pos = 0
+ b.lastRead = 0
+ b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+ return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
new file mode 100644
index 000000000..72a04f349
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
@@ -0,0 +1,186 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+ // ErrClosed is returned when Write is called on a closed BytesPipe.
+ ErrClosed = errors.New("write to closed BytesPipe")
+
+ bufPools = make(map[int]*sync.Pool)
+ bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+ mu sync.Mutex
+ wait *sync.Cond
+ buf []*fixedBuffer
+ bufLen int
+ closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+ bp := &BytesPipe{}
+ bp.buf = append(bp.buf, getBuffer(minCap))
+ bp.wait = sync.NewCond(&bp.mu)
+ return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+ bp.mu.Lock()
+
+ written := 0
+loop0:
+ for {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return written, ErrClosed
+ }
+
+ if len(bp.buf) == 0 {
+ bp.buf = append(bp.buf, getBuffer(64))
+ }
+ // get the last buffer
+ b := bp.buf[len(bp.buf)-1]
+
+ n, err := b.Write(p)
+ written += n
+ bp.bufLen += n
+
+ // errBufferFull is an error we expect to get if the buffer is full
+ if err != nil && err != errBufferFull {
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, err
+ }
+
+ // if there was enough room to write all then break
+ if len(p) == n {
+ break
+ }
+
+ // more data: write to the next slice
+ p = p[n:]
+
+ // make sure the buffer doesn't grow too big from this write
+ for bp.bufLen >= blockThreshold {
+ bp.wait.Wait()
+ if bp.closeErr != nil {
+ continue loop0
+ }
+ }
+
+ // add new byte slice to the buffers slice and continue writing
+ nextCap := b.Cap() * 2
+ if nextCap > maxCap {
+ nextCap = maxCap
+ }
+ bp.buf = append(bp.buf, getBuffer(nextCap))
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+ bp.mu.Lock()
+ if err != nil {
+ bp.closeErr = err
+ } else {
+ bp.closeErr = io.EOF
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+ return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+ bp.mu.Lock()
+ if bp.bufLen == 0 {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ bp.wait.Wait()
+ if bp.bufLen == 0 && bp.closeErr != nil {
+ err := bp.closeErr
+ bp.mu.Unlock()
+ return 0, err
+ }
+ }
+
+ for bp.bufLen > 0 {
+ b := bp.buf[0]
+ read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+ n += read
+ bp.bufLen -= read
+
+ if b.Len() == 0 {
+ // it's empty so return it to the pool and move to the next one
+ returnBuffer(b)
+ bp.buf[0] = nil
+ bp.buf = bp.buf[1:]
+ }
+
+ if len(p) == read {
+ break
+ }
+
+ p = p[read:]
+ }
+
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return
+}
+
+func returnBuffer(b *fixedBuffer) {
+ b.Reset()
+ bufPoolsLock.Lock()
+ pool := bufPools[b.Cap()]
+ bufPoolsLock.Unlock()
+ if pool != nil {
+ pool.Put(b)
+ }
+}
+
+func getBuffer(size int) *fixedBuffer {
+ bufPoolsLock.Lock()
+ pool, ok := bufPools[size]
+ if !ok {
+ pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+ bufPools[size] = pool
+ }
+ bufPoolsLock.Unlock()
+ return pool.Get().(*fixedBuffer)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
new file mode 100644
index 000000000..a56c46265
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
@@ -0,0 +1,162 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return nil, err
+ }
+
+ abspath, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &atomicFileWriter{
+ f: f,
+ fn: abspath,
+ perm: perm,
+ }, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := NewAtomicFileWriter(filename, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ f.(*atomicFileWriter).writeErr = err
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type atomicFileWriter struct {
+ f *os.File
+ fn string
+ writeErr error
+ perm os.FileMode
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+ n, err := w.f.Write(dt)
+ if err != nil {
+ w.writeErr = err
+ }
+ return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+ defer func() {
+ if retErr != nil || w.writeErr != nil {
+ os.Remove(w.f.Name())
+ }
+ }()
+ if err := w.f.Sync(); err != nil {
+ w.f.Close()
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+ return err
+ }
+ if w.writeErr == nil {
+ return os.Rename(w.f.Name(), w.fn)
+ }
+ return nil
+}
+
+// AtomicWriteSet is used to atomically write a set
+// of files and ensure they are visible at the same time.
+// Must be committed to a new directory.
+type AtomicWriteSet struct {
+ root string
+}
+
+// NewAtomicWriteSet creates a new atomic write set to
+// atomically create a set of files. The given directory
+// is used as the base directory for storing files before
+// commit. If no temporary directory is given the system
+// default is used.
+func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
+ td, err := ioutil.TempDir(tmpDir, "write-set-")
+ if err != nil {
+ return nil, err
+ }
+
+ return &AtomicWriteSet{
+ root: td,
+ }, nil
+}
+
+// WriteFile writes a file to the set, guaranteeing the file
+// has been synced.
+func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type syncFileCloser struct {
+ *os.File
+}
+
+func (w syncFileCloser) Close() error {
+ err := w.File.Sync()
+ if err1 := w.File.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// FileWriter opens a file writer inside the set. The file
+// should be synced and closed before calling commit.
+func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return syncFileCloser{f}, nil
+}
+
+// Cancel cancels the set and removes all temporary data
+// created in the set.
+func (ws *AtomicWriteSet) Cancel() error {
+ return os.RemoveAll(ws.root)
+}
+
+// Commit moves all created files to the target directory. The
+// target directory must not exist and the parent of the target
+// directory must exist.
+func (ws *AtomicWriteSet) Commit(target string) error {
+ return os.Rename(ws.root, target)
+}
+
+// String returns the location the set is writing to.
+func (ws *AtomicWriteSet) String() string {
+ return ws.root
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 000000000..63f3c07f4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps an io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+ cancel func()
+ pR *io.PipeReader // Stream to read from
+ pW *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+ pR, pW := io.Pipe()
+
+ // Create a context used to signal when the pipe is closed
+ doneCtx, cancel := context.WithCancel(context.Background())
+
+ p := &cancelReadCloser{
+ cancel: cancel,
+ pR: pR,
+ pW: pW,
+ }
+
+ go func() {
+ _, err := io.Copy(pW, in)
+ select {
+ case <-ctx.Done():
+ // If the context was closed, p.closeWithError
+ // was already called. Calling it again would
+ // change the error that Read returns.
+ default:
+ p.closeWithError(err)
+ }
+ in.Close()
+ }()
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ p.closeWithError(ctx.Err())
+ case <-doneCtx.Done():
+ return
+ }
+ }
+ }()
+
+ return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+ return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+ p.pW.CloseWithError(err)
+ p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+ p.closeWithError(io.EOF)
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
new file mode 100644
index 000000000..1539ad21b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+ return ioutil.TempDir(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
new file mode 100644
index 000000000..c258e5fdd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+ "io/ioutil"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+ tempDir, err := ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ return longpath.AddPrefix(tempDir), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 000000000..52a4901ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+ "io"
+ "sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+ w io.Writer
+ flusher flusher
+ flushed chan struct{}
+ flushedOnce sync.Once
+ closed chan struct{}
+ closeLock sync.Mutex
+}
+
+type flusher interface {
+ Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ select {
+ case <-wf.closed:
+ return 0, errWriteFlusherClosed
+ default:
+ }
+
+ n, err = wf.w.Write(b)
+ wf.Flush() // every write is a flush.
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ select {
+ case <-wf.closed:
+ return
+ default:
+ }
+
+ wf.flushedOnce.Do(func() {
+ close(wf.flushed)
+ })
+ wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+ // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+ // be used to detect whether or a response code has been issued or not.
+ // Another hook should be used instead.
+ var flushed bool
+ select {
+ case <-wf.flushed:
+ flushed = true
+ default:
+ }
+ return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+ wf.closeLock.Lock()
+ defer wf.closeLock.Unlock()
+
+ select {
+ case <-wf.closed:
+ return errWriteFlusherClosed
+ default:
+ close(wf.closed)
+ }
+ return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var fl flusher
+ if f, ok := w.(flusher); ok {
+ fl = f
+ } else {
+ fl = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 000000000..ccc7f9c23
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,66 @@
+package ioutils
+
+import "io"
+
+// NopWriter represents a type which write operation is nop.
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+// NopWriteCloser returns a nopWriteCloser.
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+// NopFlusher represents a type which flush operation is nop.
+type NopFlusher struct{}
+
+// Flush is a nop operation.
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewWriteCloserWrapper returns a new io.WriteCloser.
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+// NewWriteCounter returns a new WriteCounter.
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 000000000..9b15bfff4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go
new file mode 100644
index 000000000..607dbed43
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags.go
@@ -0,0 +1,149 @@
+package mount
+
+import (
+ "fmt"
+ "strings"
+)
+
+var flags = map[string]struct {
+ clear bool
+ flag int
+}{
+ "defaults": {false, 0},
+ "ro": {false, RDONLY},
+ "rw": {true, RDONLY},
+ "suid": {true, NOSUID},
+ "nosuid": {false, NOSUID},
+ "dev": {true, NODEV},
+ "nodev": {false, NODEV},
+ "exec": {true, NOEXEC},
+ "noexec": {false, NOEXEC},
+ "sync": {false, SYNCHRONOUS},
+ "async": {true, SYNCHRONOUS},
+ "dirsync": {false, DIRSYNC},
+ "remount": {false, REMOUNT},
+ "mand": {false, MANDLOCK},
+ "nomand": {true, MANDLOCK},
+ "atime": {true, NOATIME},
+ "noatime": {false, NOATIME},
+ "diratime": {true, NODIRATIME},
+ "nodiratime": {false, NODIRATIME},
+ "bind": {false, BIND},
+ "rbind": {false, RBIND},
+ "unbindable": {false, UNBINDABLE},
+ "runbindable": {false, RUNBINDABLE},
+ "private": {false, PRIVATE},
+ "rprivate": {false, RPRIVATE},
+ "shared": {false, SHARED},
+ "rshared": {false, RSHARED},
+ "slave": {false, SLAVE},
+ "rslave": {false, RSLAVE},
+ "relatime": {false, RELATIME},
+ "norelatime": {true, RELATIME},
+ "strictatime": {false, STRICTATIME},
+ "nostrictatime": {true, STRICTATIME},
+}
+
+var validFlags = map[string]bool{
+ "": true,
+ "size": true,
+ "mode": true,
+ "uid": true,
+ "gid": true,
+ "nr_inodes": true,
+ "nr_blocks": true,
+ "mpol": true,
+}
+
+var propagationFlags = map[string]bool{
+ "bind": true,
+ "rbind": true,
+ "unbindable": true,
+ "runbindable": true,
+ "private": true,
+ "rprivate": true,
+ "shared": true,
+ "rshared": true,
+ "slave": true,
+ "rslave": true,
+}
+
+// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
+func MergeTmpfsOptions(options []string) ([]string, error) {
+ // We use collisions maps to remove duplicates.
+ // For flag, the key is the flag value (the key for propagation flag is -1)
+ // For data=value, the key is the data
+ flagCollisions := map[int]bool{}
+ dataCollisions := map[string]bool{}
+
+ var newOptions []string
+ // We process in reverse order
+ for i := len(options) - 1; i >= 0; i-- {
+ option := options[i]
+ if option == "defaults" {
+ continue
+ }
+ if f, ok := flags[option]; ok && f.flag != 0 {
+ // There is only one propagation mode
+ key := f.flag
+ if propagationFlags[option] {
+ key = -1
+ }
+ // Check to see if there is collision for flag
+ if !flagCollisions[key] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ flagCollisions[key] = true
+ }
+ continue
+ }
+ opt := strings.SplitN(option, "=", 2)
+ if len(opt) != 2 || !validFlags[opt[0]] {
+ return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ if !dataCollisions[opt[0]] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ dataCollisions[opt[0]] = true
+ }
+ }
+
+ return newOptions, nil
+}
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+ var (
+ flag int
+ data []string
+ )
+
+ for _, o := range strings.Split(options, ",") {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, strings.Join(data, ",")
+}
+
+// ParseTmpfsOptions parse fstab type mount options into flags and data
+func ParseTmpfsOptions(options string) (int, string, error) {
+ flags, data := parseOptions(options)
+ for _, o := range strings.Split(data, ",") {
+ opt := strings.SplitN(o, "=", 2)
+ if !validFlags[opt[0]] {
+ return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ }
+ return flags, data, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
new file mode 100644
index 000000000..5f76f331b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
@@ -0,0 +1,49 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include <sys/mount.h>
+*/
+import "C"
+
+const (
+ // RDONLY will mount the filesystem as read-only.
+ RDONLY = C.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = C.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = C.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+ SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NODEV = 0
+ NODIRATIME = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIVE = 0
+ RELATIME = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ mntDetach = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
new file mode 100644
index 000000000..0425d0dd6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
@@ -0,0 +1,87 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = unix.MS_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = unix.MS_NOSUID
+
+ // NODEV will not interpret character or block special devices on the file
+ // system.
+ NODEV = unix.MS_NODEV
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = unix.MS_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = unix.MS_SYNCHRONOUS
+
+ // DIRSYNC will force all directory updates within the file system to be done
+ // synchronously. This affects the following system calls: create, link,
+ // unlink, symlink, mkdir, rmdir, mknod and rename.
+ DIRSYNC = unix.MS_DIRSYNC
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = unix.MS_REMOUNT
+
+ // MANDLOCK will force mandatory locks on a filesystem.
+ MANDLOCK = unix.MS_MANDLOCK
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = unix.MS_NOATIME
+
+ // NODIRATIME will not update the directory access time.
+ NODIRATIME = unix.MS_NODIRATIME
+
+ // BIND remounts a subtree somewhere else.
+ BIND = unix.MS_BIND
+
+ // RBIND remounts a subtree and all possible submounts somewhere else.
+ RBIND = unix.MS_BIND | unix.MS_REC
+
+ // UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+ UNBINDABLE = unix.MS_UNBINDABLE
+
+ // RUNBINDABLE marks the entire mount tree as UNBINDABLE.
+ RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
+
+ // PRIVATE creates a mount which carries no propagation abilities.
+ PRIVATE = unix.MS_PRIVATE
+
+ // RPRIVATE marks the entire mount tree as PRIVATE.
+ RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
+
+ // SLAVE creates a mount which receives propagation from its master, but not
+ // vice versa.
+ SLAVE = unix.MS_SLAVE
+
+ // RSLAVE marks the entire mount tree as SLAVE.
+ RSLAVE = unix.MS_SLAVE | unix.MS_REC
+
+ // SHARED creates a mount which provides the ability to create mirrors of
+ // that mount such that mounts and unmounts within any of the mirrors
+ // propagate to the other mirrors.
+ SHARED = unix.MS_SHARED
+
+ // RSHARED marks the entire mount tree as SHARED.
+ RSHARED = unix.MS_SHARED | unix.MS_REC
+
+ // RELATIME updates inode access times relative to modify or change time.
+ RELATIME = unix.MS_RELATIME
+
+ // STRICTATIME allows to explicitly request full atime updates. This makes
+ // it possible for the kernel to default to relatime or noatime but still
+ // allow userspace to override it.
+ STRICTATIME = unix.MS_STRICTATIME
+
+ mntDetach = unix.MNT_DETACH
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
new file mode 100644
index 000000000..9ed741e3f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
@@ -0,0 +1,31 @@
+// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
+
+package mount
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NOATIME = 0
+ NODEV = 0
+ NODIRATIME = 0
+ NOEXEC = 0
+ NOSUID = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIME = 0
+ RELATIVE = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ SYNCHRONOUS = 0
+ RDONLY = 0
+ mntDetach = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go
new file mode 100644
index 000000000..c9fdfd694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mount.go
@@ -0,0 +1,86 @@
+package mount
+
+import (
+ "sort"
+ "strings"
+)
+
+// GetMounts retrieves a list of mounts for the current running process.
+func GetMounts() ([]*Info, error) {
+ return parseMountTable()
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+func Mounted(mountpoint string) (bool, error) {
+ entries, err := parseMountTable()
+ if err != nil {
+ return false, err
+ }
+
+ // Search the table for the mountpoint
+ for _, e := range entries {
+ if e.Mountpoint == mountpoint {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func Mount(device, target, mType, options string) error {
+ flag, _ := parseOptions(options)
+ if flag&REMOUNT != REMOUNT {
+ if mounted, err := Mounted(target); err != nil || mounted {
+ return err
+ }
+ }
+ return ForceMount(device, target, mType, options)
+}
+
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func ForceMount(device, target, mType, options string) error {
+ flag, data := parseOptions(options)
+ return mount(device, target, mType, uintptr(flag), data)
+}
+
+// Unmount lazily unmounts a filesystem on supported platforms, otherwise
+// does a normal unmount.
+func Unmount(target string) error {
+ if mounted, err := Mounted(target); err != nil || !mounted {
+ return err
+ }
+ return unmount(target, mntDetach)
+}
+
+// RecursiveUnmount unmounts the target and all mounts underneath, starting with
+// the deepsest mount first.
+func RecursiveUnmount(target string) error {
+ mounts, err := GetMounts()
+ if err != nil {
+ return err
+ }
+
+ // Make the deepest mount be first
+ sort.Sort(sort.Reverse(byMountpoint(mounts)))
+
+ for i, m := range mounts {
+ if !strings.HasPrefix(m.Mountpoint, target) {
+ continue
+ }
+ if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
+ if mounted, err := Mounted(m.Mountpoint); err != nil || mounted {
+ return err
+ }
+ // Ignore errors for submounts and continue trying to unmount others
+ // The final unmount should fail if there ane any submounts remaining
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
new file mode 100644
index 000000000..814896cc9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,60 @@
+package mount
+
+/*
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/_iovec.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+ out := make([]C.struct_iovec, len(options))
+ for i, option := range options {
+ out[i].iov_base = unsafe.Pointer(C.CString(option))
+ out[i].iov_len = C.size_t(len(option) + 1)
+ }
+ return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ isNullFS := false
+
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ }
+ }
+
+ options := []string{"fspath", target}
+ if isNullFS {
+ options = append(options, "fstype", "nullfs", "target", device)
+ } else {
+ options = append(options, "fstype", mType, "from", device)
+ }
+ rawOptions := allocateIOVecs(options)
+ for _, rawOption := range rawOptions {
+ defer C.free(rawOption.iov_base)
+ }
+
+ if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+ reason := C.GoString(C.strerror(*C.__error()))
+ return fmt.Errorf("Failed to call nmount: %s", reason)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
new file mode 100644
index 000000000..39c36d472
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
@@ -0,0 +1,57 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // ptypes is the set propagation types.
+ ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
+
+ // pflags is the full set valid flags for a change propagation call.
+ pflags = ptypes | unix.MS_REC | unix.MS_SILENT
+
+ // broflags is the combination of bind and read only
+ broflags = unix.MS_BIND | unix.MS_RDONLY
+)
+
+// isremount returns true if either device name or flags identify a remount request, false otherwise.
+func isremount(device string, flags uintptr) bool {
+ switch {
+ // We treat device "" and "none" as a remount request to provide compatibility with
+ // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
+ case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
+ return true
+ default:
+ return false
+ }
+}
+
+func mount(device, target, mType string, flags uintptr, data string) error {
+ oflags := flags &^ ptypes
+ if !isremount(device, flags) || data != "" {
+ // Initial call applying all non-propagation flags for mount
+ // or remount with changed data
+ if err := unix.Mount(device, target, mType, oflags, data); err != nil {
+ return err
+ }
+ }
+
+ if flags&ptypes != 0 {
+ // Change the propagation type.
+ if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
+ return err
+ }
+ }
+
+ if oflags&broflags == broflags {
+ // Remount the bind to apply read only.
+ return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "")
+ }
+
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
new file mode 100644
index 000000000..c684aa81f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
@@ -0,0 +1,33 @@
+// +build solaris,cgo
+
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+ "unsafe"
+)
+
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <sys/mount.h>
+// int Mount(const char *spec, const char *dir, int mflag,
+// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
+// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
+// }
+import "C"
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ spec := C.CString(device)
+ dir := C.CString(target)
+ fstype := C.CString(mType)
+ _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
+ C.free(unsafe.Pointer(spec))
+ C.free(unsafe.Pointer(dir))
+ C.free(unsafe.Pointer(fstype))
+ return err
+}
+
+func unmount(target string, flag int) error {
+ err := unix.Unmount(target, flag)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
new file mode 100644
index 000000000..a2a3bb457
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ panic("Not implemented")
+}
+
+func unmount(target string, flag int) error {
+ panic("Not implemented")
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
new file mode 100644
index 000000000..ff4cc1d86
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
@@ -0,0 +1,54 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
+
+type byMountpoint []*Info
+
+func (by byMountpoint) Len() int {
+ return len(by)
+}
+
+func (by byMountpoint) Less(i, j int) bool {
+ return by[i].Mountpoint < by[j].Mountpoint
+}
+
+func (by byMountpoint) Swap(i, j int) {
+ by[i], by[j] = by[j], by[i]
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 000000000..4f32edcd9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,41 @@
+package mount
+
+/*
+#include <sys/param.h>
+#include <sys/ucred.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
+func parseMountTable() ([]*Info, error) {
+ var rawEntries *C.struct_statfs
+
+ count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+ if count == 0 {
+ return nil, fmt.Errorf("Failed to call getmntinfo")
+ }
+
+ var entries []C.struct_statfs
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+ header.Cap = count
+ header.Len = count
+ header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+ var out []*Info
+ for _, entry := range entries {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+ mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+ out = append(out, &mountinfo)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
new file mode 100644
index 000000000..be69fee1d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package mount
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out = []*Info{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &Info{}
+ text = s.Text()
+ optionalFields string
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.ID, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) < 3 {
+ return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ if optionalFields != "-" {
+ p.Optional = optionalFields
+ }
+
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+ out = append(out, p)
+ }
+ return out, nil
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
new file mode 100644
index 000000000..ad9ab57f8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
@@ -0,0 +1,37 @@
+// +build solaris,cgo
+
+package mount
+
+/*
+#include <stdio.h>
+#include <sys/mnttab.h>
+*/
+import "C"
+
+import (
+ "fmt"
+)
+
+func parseMountTable() ([]*Info, error) {
+ mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+ if mnttab == nil {
+ return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
+ }
+
+ var out []*Info
+ var mp C.struct_mnttab
+
+ ret := C.getmntent(mnttab, &mp)
+ for ret == 0 {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
+ mountinfo.Source = C.GoString(mp.mnt_special)
+ mountinfo.Fstype = C.GoString(mp.mnt_fstype)
+ mountinfo.Opts = C.GoString(mp.mnt_mntopts)
+ out = append(out, &mountinfo)
+ ret = C.getmntent(mnttab, &mp)
+ }
+
+ C.fclose(mnttab)
+ return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 000000000..7fbcf1921
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func parseMountTable() ([]*Info, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
new file mode 100644
index 000000000..dab8a37ed
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
@@ -0,0 +1,6 @@
+package mount
+
+func parseMountTable() ([]*Info, error) {
+ // Do NOT return an error!
+ return nil, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
new file mode 100644
index 000000000..8ceec84bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
@@ -0,0 +1,69 @@
+// +build linux
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ mounted, err := Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ if !mounted {
+ if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+ return err
+ }
+ }
+ if _, err = Mounted(mountPoint); err != nil {
+ return err
+ }
+
+ return ForceMount("", mountPoint, "none", options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
new file mode 100644
index 000000000..09f6b03cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
@@ -0,0 +1,58 @@
+// +build solaris
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ // TODO: Solaris does not support bind mounts.
+ // Evaluate lofs and also look at the relevant
+ // mount flags to be supported.
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 000000000..6a111a3ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,137 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+const buffer32K = 32 * 1024
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+ buffer32KPool = newBufferPoolWithSize(buffer32K)
+)
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ return &BufioReaderPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+type bufferPool struct {
+ pool sync.Pool
+}
+
+func newBufferPoolWithSize(size int) *bufferPool {
+ return &bufferPool{
+ pool: sync.Pool{
+ New: func() interface{} { return make([]byte, size) },
+ },
+ }
+}
+
+func (bp *bufferPool) Get() []byte {
+ return bp.pool.Get().([]byte)
+}
+
+func (bp *bufferPool) Put(b []byte) {
+ bp.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := buffer32KPool.Get()
+ written, err = io.CopyBuffer(dst, src, buf)
+ buffer32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ return &BufioWriterPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md
new file mode 100644
index 000000000..2b237a594
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go
new file mode 100644
index 000000000..68bb77cf5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal.go
@@ -0,0 +1,54 @@
+// Package signal provides helper functions for dealing with signals across
+// various operating systems.
+package signal
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// CatchAll catches all signals and relays them to the specified channel.
+func CatchAll(sigc chan os.Signal) {
+ handledSigs := []os.Signal{}
+ for _, s := range SignalMap {
+ handledSigs = append(handledSigs, s)
+ }
+ signal.Notify(sigc, handledSigs...)
+}
+
+// StopCatch stops catching the signals and closes the specified channel.
+func StopCatch(sigc chan os.Signal) {
+ signal.Stop(sigc)
+ close(sigc)
+}
+
+// ParseSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return syscall.Signal(s), nil
+ }
+ signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
+ if !ok {
+ return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return signal, nil
+}
+
+// ValidSignalForPlatform returns true if a signal is valid on the platform
+func ValidSignalForPlatform(sig syscall.Signal) bool {
+ for _, v := range SignalMap {
+ if v == sig {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
new file mode 100644
index 000000000..946de87e9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
@@ -0,0 +1,41 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of Darwin signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUG": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
new file mode 100644
index 000000000..6b9569bb7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
@@ -0,0 +1,43 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of FreeBSD signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUF": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "LWP": syscall.SIGLWP,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "THR": syscall.SIGTHR,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go
new file mode 100644
index 000000000..3594796ca
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go
@@ -0,0 +1,82 @@
+package signal
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ sigrtmin = 34
+ sigrtmax = 64
+)
+
+// SignalMap is a map of Linux signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": unix.SIGABRT,
+ "ALRM": unix.SIGALRM,
+ "BUS": unix.SIGBUS,
+ "CHLD": unix.SIGCHLD,
+ "CLD": unix.SIGCLD,
+ "CONT": unix.SIGCONT,
+ "FPE": unix.SIGFPE,
+ "HUP": unix.SIGHUP,
+ "ILL": unix.SIGILL,
+ "INT": unix.SIGINT,
+ "IO": unix.SIGIO,
+ "IOT": unix.SIGIOT,
+ "KILL": unix.SIGKILL,
+ "PIPE": unix.SIGPIPE,
+ "POLL": unix.SIGPOLL,
+ "PROF": unix.SIGPROF,
+ "PWR": unix.SIGPWR,
+ "QUIT": unix.SIGQUIT,
+ "SEGV": unix.SIGSEGV,
+ "STKFLT": unix.SIGSTKFLT,
+ "STOP": unix.SIGSTOP,
+ "SYS": unix.SIGSYS,
+ "TERM": unix.SIGTERM,
+ "TRAP": unix.SIGTRAP,
+ "TSTP": unix.SIGTSTP,
+ "TTIN": unix.SIGTTIN,
+ "TTOU": unix.SIGTTOU,
+ "UNUSED": unix.SIGUNUSED,
+ "URG": unix.SIGURG,
+ "USR1": unix.SIGUSR1,
+ "USR2": unix.SIGUSR2,
+ "VTALRM": unix.SIGVTALRM,
+ "WINCH": unix.SIGWINCH,
+ "XCPU": unix.SIGXCPU,
+ "XFSZ": unix.SIGXFSZ,
+ "RTMIN": sigrtmin,
+ "RTMIN+1": sigrtmin + 1,
+ "RTMIN+2": sigrtmin + 2,
+ "RTMIN+3": sigrtmin + 3,
+ "RTMIN+4": sigrtmin + 4,
+ "RTMIN+5": sigrtmin + 5,
+ "RTMIN+6": sigrtmin + 6,
+ "RTMIN+7": sigrtmin + 7,
+ "RTMIN+8": sigrtmin + 8,
+ "RTMIN+9": sigrtmin + 9,
+ "RTMIN+10": sigrtmin + 10,
+ "RTMIN+11": sigrtmin + 11,
+ "RTMIN+12": sigrtmin + 12,
+ "RTMIN+13": sigrtmin + 13,
+ "RTMIN+14": sigrtmin + 14,
+ "RTMIN+15": sigrtmin + 15,
+ "RTMAX-14": sigrtmax - 14,
+ "RTMAX-13": sigrtmax - 13,
+ "RTMAX-12": sigrtmax - 12,
+ "RTMAX-11": sigrtmax - 11,
+ "RTMAX-10": sigrtmax - 10,
+ "RTMAX-9": sigrtmax - 9,
+ "RTMAX-8": sigrtmax - 8,
+ "RTMAX-7": sigrtmax - 7,
+ "RTMAX-6": sigrtmax - 6,
+ "RTMAX-5": sigrtmax - 5,
+ "RTMAX-4": sigrtmax - 4,
+ "RTMAX-3": sigrtmax - 3,
+ "RTMAX-2": sigrtmax - 2,
+ "RTMAX-1": sigrtmax - 1,
+ "RTMAX": sigrtmax,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
new file mode 100644
index 000000000..89576b9e3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
@@ -0,0 +1,42 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of Solaris signals.
+// SIGINFO and SIGTHR not defined for Solaris
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUF": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "LWP": syscall.SIGLWP,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go
new file mode 100644
index 000000000..5d058fd56
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go
@@ -0,0 +1,21 @@
+// +build !windows
+
+package signal
+
+import (
+ "syscall"
+)
+
+// Signals used in cli/command (no windows equivalent, use
+// invalid signals so they don't get handled)
+
+const (
+ // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted.
+ SIGCHLD = syscall.SIGCHLD
+ // SIGWINCH is a signal sent to a process when its controlling terminal changes its size
+ SIGWINCH = syscall.SIGWINCH
+ // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading
+ SIGPIPE = syscall.SIGPIPE
+ // DefaultStopSignal is the syscall signal used to stop a container in unix systems.
+ DefaultStopSignal = "SIGTERM"
+)
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
new file mode 100644
index 000000000..c592d37df
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!darwin,!freebsd,!windows,!solaris
+
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is an empty map of signals for unsupported platform.
+var SignalMap = map[string]syscall.Signal{}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go
new file mode 100644
index 000000000..440f2700e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package signal
+
+import (
+ "syscall"
+)
+
+// Signals used in cli/command (no windows equivalent, use
+// invalid signals so they don't get handled)
+const (
+ SIGCHLD = syscall.Signal(0xff)
+ SIGWINCH = syscall.Signal(0xff)
+ SIGPIPE = syscall.Signal(0xff)
+ // DefaultStopSignal is the syscall signal used to stop a container in windows systems.
+ DefaultStopSignal = "15"
+)
+
+// SignalMap is a map of "supported" signals. As per the comment in GOLang's
+// ztypes_windows.go: "More invented values for signals". Windows doesn't
+// really support signals in any way, shape or form that Unix does.
+//
+// We have these so that docker kill can be used to gracefully (TERM) and
+// forcibly (KILL) terminate a container on Windows.
+var SignalMap = map[string]syscall.Signal{
+ "KILL": syscall.SIGKILL,
+ "TERM": syscall.SIGTERM,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go
new file mode 100644
index 000000000..2884dfee3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/trap.go
@@ -0,0 +1,104 @@
+package signal
+
+import (
+ "fmt"
+ "os"
+ gosignal "os/signal"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+// Trap sets up a simplified signal "trap", appropriate for common
+// behavior expected from a vanilla unix command-line tool in general
+// (and the Docker engine in particular).
+//
+// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
+// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
+// skipped and the process is terminated immediately (allows force quit of stuck daemon)
+// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
+// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while
+// the docker daemon is not restarted and also running under systemd.
+// Fixes https://github.com/docker/docker/issues/19728
+//
+func Trap(cleanup func(), logger interface {
+ Info(args ...interface{})
+}) {
+ c := make(chan os.Signal, 1)
+ // we will handle INT, TERM, QUIT, SIGPIPE here
+ signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE}
+ gosignal.Notify(c, signals...)
+ go func() {
+ interruptCount := uint32(0)
+ for sig := range c {
+ if sig == syscall.SIGPIPE {
+ continue
+ }
+
+ go func(sig os.Signal) {
+ logger.Info(fmt.Sprintf("Processing signal '%v'", sig))
+ switch sig {
+ case os.Interrupt, syscall.SIGTERM:
+ if atomic.LoadUint32(&interruptCount) < 3 {
+ // Initiate the cleanup only once
+ if atomic.AddUint32(&interruptCount, 1) == 1 {
+ // Call the provided cleanup handler
+ cleanup()
+ os.Exit(0)
+ } else {
+ return
+ }
+ } else {
+ // 3 SIGTERM/INT signals received; force exit without cleanup
+ logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
+ }
+ case syscall.SIGQUIT:
+ DumpStacks("")
+ logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
+ }
+ //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
+ os.Exit(128 + int(sig.(syscall.Signal)))
+ }(sig)
+ }
+ }()
+}
+
+const stacksLogNameTemplate = "goroutine-stacks-%s.log"
+
+// DumpStacks appends the runtime stack into file in dir and returns full path
+// to that file.
+func DumpStacks(dir string) (string, error) {
+ var (
+ buf []byte
+ stackSize int
+ )
+ bufferLen := 16384
+ for stackSize == len(buf) {
+ buf = make([]byte, bufferLen)
+ stackSize = runtime.Stack(buf, true)
+ bufferLen *= 2
+ }
+ buf = buf[:stackSize]
+ var f *os.File
+ if dir != "" {
+ path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1)))
+ var err error
+ f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to open file to write the goroutine stacks")
+ }
+ defer f.Close()
+ defer f.Sync()
+ } else {
+ f = os.Stderr
+ }
+ if _, err := f.Write(buf); err != nil {
+ return "", errors.Wrap(err, "failed to write goroutine stacks")
+ }
+ return f.Name(), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md
new file mode 100644
index 000000000..37a5098fd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with string identifiers
diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
new file mode 100644
index 000000000..a0c7c42a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
@@ -0,0 +1,99 @@
+// Package stringid provides helper functions for dealing with string identifiers
+package stringid
+
+import (
+ cryptorand "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "math/rand"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const shortLen = 12
+
+var (
+ validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
+ validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
+)
+
+// IsShortID determines if an arbitrary string *looks like* a short ID.
+func IsShortID(id string) bool {
+ return validShortID.MatchString(id)
+}
+
+// TruncateID returns a shorthand version of a string identifier for convenience.
+// A collision with other shorthands is very unlikely, but possible.
+// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
+// will need to use a longer prefix, or the full-length Id.
+func TruncateID(id string) string {
+ if i := strings.IndexRune(id, ':'); i >= 0 {
+ id = id[i+1:]
+ }
+ if len(id) > shortLen {
+ id = id[:shortLen]
+ }
+ return id
+}
+
+func generateID(r io.Reader) string {
+ b := make([]byte, 32)
+ for {
+ if _, err := io.ReadFull(r, b); err != nil {
+ panic(err) // This shouldn't happen
+ }
+ id := hex.EncodeToString(b)
+ // if we try to parse the truncated for as an int and we don't have
+ // an error then the value is all numeric and causes issues when
+ // used as a hostname. ref #3869
+ if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
+ continue
+ }
+ return id
+ }
+}
+
+// GenerateRandomID returns a unique id.
+func GenerateRandomID() string {
+ return generateID(cryptorand.Reader)
+}
+
+// GenerateNonCryptoID generates unique id without using cryptographically
+// secure sources of random.
+// It helps you to save entropy.
+func GenerateNonCryptoID() string {
+ return generateID(readerFunc(rand.Read))
+}
+
+// ValidateID checks whether an ID string is a valid image ID.
+func ValidateID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return fmt.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
+func init() {
+ // safely set the seed globally so we generate random ids. Tries to use a
+ // crypto seed before falling back to time.
+ var seed int64
+ if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
+ // This should not happen, but worst-case fallback to time-based seed.
+ seed = time.Now().UnixNano()
+ } else {
+ seed = cryptoseed.Int64()
+ }
+
+ rand.Seed(seed)
+}
+
+type readerFunc func(p []byte) (int, error)
+
+func (fn readerFunc) Read(p []byte) (int, error) {
+ return fn(p)
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md
new file mode 100644
index 000000000..b3e454573
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with strings
diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go
new file mode 100644
index 000000000..8c4c39875
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go
@@ -0,0 +1,99 @@
+// Package stringutils provides helper functions for dealing with strings.
+package stringutils
+
+import (
+ "bytes"
+ "math/rand"
+ "strings"
+)
+
+// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
+func GenerateRandomAlphaOnlyString(n int) string {
+ // make a really long string
+ letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letters[rand.Intn(len(letters))]
+ }
+ return string(b)
+}
+
+// GenerateRandomASCIIString generates an ASCII random string with length n.
+func GenerateRandomASCIIString(n int) string {
+ chars := "abcdefghijklmnopqrstuvwxyz" +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+ "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
+ res := make([]byte, n)
+ for i := 0; i < n; i++ {
+ res[i] = chars[rand.Intn(len(chars))]
+ }
+ return string(res)
+}
+
+// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...).
+// For maxlen of 3 and lower, no ellipsis is appended.
+func Ellipsis(s string, maxlen int) string {
+ r := []rune(s)
+ if len(r) <= maxlen {
+ return s
+ }
+ if maxlen <= 3 {
+ return string(r[:maxlen])
+ }
+ return string(r[:maxlen-3]) + "..."
+}
+
+// Truncate truncates a string to maxlen.
+func Truncate(s string, maxlen int) string {
+ r := []rune(s)
+ if len(r) <= maxlen {
+ return s
+ }
+ return string(r[:maxlen])
+}
+
+// InSlice tests whether a string is contained in a slice of strings or not.
+// Comparison is case insensitive
+func InSlice(slice []string, s string) bool {
+ for _, ss := range slice {
+ if strings.ToLower(s) == strings.ToLower(ss) {
+ return true
+ }
+ }
+ return false
+}
+
+func quote(word string, buf *bytes.Buffer) {
+ // Bail out early for "simple" strings
+ if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
+ buf.WriteString(word)
+ return
+ }
+
+ buf.WriteString("'")
+
+ for i := 0; i < len(word); i++ {
+ b := word[i]
+ if b == '\'' {
+ // Replace literal ' with a close ', a \', and an open '
+ buf.WriteString("'\\''")
+ } else {
+ buf.WriteByte(b)
+ }
+ }
+
+ buf.WriteString("'")
+}
+
+// ShellQuoteArguments takes a list of strings and escapes them so they will be
+// handled right when passed as arguments to a program via a shell
+func ShellQuoteArguments(args []string) string {
+ var buf bytes.Buffer
+ for i, arg := range args {
+ if i != 0 {
+ buf.WriteByte(' ')
+ }
+ quote(arg, &buf)
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
new file mode 100644
index 000000000..b9fbf3c98
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2017 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD
new file mode 100644
index 000000000..4c056c5ed
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md
new file mode 100644
index 000000000..8dba54fd0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/README.md
@@ -0,0 +1,6 @@
+Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks,
+as well as a Windows long-path aware version of filepath.EvalSymlinks
+from the [Go standard library](https://golang.org/pkg/path/filepath).
+
+The code from filepath.EvalSymlinks has been adapted in fs.go.
+Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go
new file mode 100644
index 000000000..52fb9a691
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+// This code is a modified version of path/filepath/symlink.go from the Go standard library.
+
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an
+// absolute path. This function handles paths in a platform-agnostic manner.
+func FollowSymlinkInScope(path, root string) (string, error) {
+ path, err := filepath.Abs(filepath.FromSlash(path))
+ if err != nil {
+ return "", err
+ }
+ root, err = filepath.Abs(filepath.FromSlash(root))
+ if err != nil {
+ return "", err
+ }
+ return evalSymlinksInScope(path, root)
+}
+
+// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
+// a result guaranteed to be contained within the scope `root`, at the time of the call.
+// Symlinks in `root` are not evaluated and left as-is.
+// Errors encountered while attempting to evaluate symlinks in path will be returned.
+// Non-existing paths are valid and do not constitute an error.
+// `path` has to contain `root` as a prefix, or else an error will be returned.
+// Trying to break out from `root` does not constitute an error.
+//
+// Example:
+// If /foo/bar -> /outside,
+// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside"
+//
+// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
+// are created and not to create subsequently, additional symlinks that could potentially make a
+// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
+// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
+// no longer be considered safely contained in "/foo".
+func evalSymlinksInScope(path, root string) (string, error) {
+ root = filepath.Clean(root)
+ if path == root {
+ return path, nil
+ }
+ if !strings.HasPrefix(path, root) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ const maxIter = 255
+ originalPath := path
+ // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
+ path = path[len(root):]
+ if root == string(filepath.Separator) {
+ path = string(filepath.Separator) + path
+ }
+ if !strings.HasPrefix(path, string(filepath.Separator)) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ path = filepath.Clean(path)
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ // b here will always be considered to be the "current absolute path inside
+ // root" when we append paths to it, we also append a slash and use
+ // filepath.Clean after the loop to trim the trailing slash
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
+ }
+
+ // find next path component, p
+ i := strings.IndexRune(path, filepath.Separator)
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ continue
+ }
+
+ // this takes a b.String() like "b/../" and a p like "c" and turns it
+ // into "/b/../c" which then gets filepath.Cleaned into "/c" and then
+ // root gets prepended and we Clean again (to remove any trailing slash
+ // if the first Clean gave us just "/")
+ cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
+ if isDriveOrRoot(cleanP) {
+ // never Lstat "/" itself, or drive letters on Windows
+ b.Reset()
+ continue
+ }
+ fullP := filepath.Clean(root + cleanP)
+
+ fi, err := os.Lstat(fullP)
+ if os.IsNotExist(err) {
+ // if p does not exist, accept it
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(fullP)
+ if err != nil {
+ return "", err
+ }
+ if system.IsAbs(dest) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+
+ // see note above on "fullP := ..." for why this is double-cleaned and
+ // what's happening here
+ return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
+}
+
+// EvalSymlinks returns the path name after the evaluation of any symbolic
+// links.
+// If path is relative the result will be relative to the current directory,
+// unless one of the components is an absolute symbolic link.
+// This version has been updated to support long paths prepended with `\\?\`.
+func EvalSymlinks(path string) (string, error) {
+ return evalSymlinks(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
new file mode 100644
index 000000000..22708273d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package symlink
+
+import (
+ "path/filepath"
+)
+
+func evalSymlinks(path string) (string, error) {
+ return filepath.EvalSymlinks(path)
+}
+
+func isDriveOrRoot(p string) bool {
+ return p == string(filepath.Separator)
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
new file mode 100644
index 000000000..31523ade9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
@@ -0,0 +1,169 @@
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/longpath"
+ "golang.org/x/sys/windows"
+)
+
+func toShort(path string) (string, error) {
+ p, err := windows.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetShortPathName says we can reuse buffer
+ n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil {
+ return "", err
+ }
+ }
+ return windows.UTF16ToString(b), nil
+}
+
+func toLong(path string) (string, error) {
+ p, err := windows.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetLongPathName says we can reuse buffer
+ n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ }
+ b = b[:n]
+ return windows.UTF16ToString(b), nil
+}
+
+func evalSymlinks(path string) (string, error) {
+ path, err := walkSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+
+ p, err := toShort(path)
+ if err != nil {
+ return "", err
+ }
+ p, err = toLong(p)
+ if err != nil {
+ return "", err
+ }
+ // windows.GetLongPathName does not change the case of the drive letter,
+ // but the result of EvalSymlinks must be unique, so we have
+ // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`).
+ // Make drive letter upper case.
+ if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' {
+ p = string(p[0]+'A'-'a') + p[1:]
+ } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' {
+ p = p[:3] + string(p[4]+'A'-'a') + p[5:]
+ }
+ return filepath.Clean(p), nil
+}
+
+const utf8RuneSelf = 0x80
+
+func walkSymlinks(path string) (string, error) {
+ const maxIter = 255
+ originalPath := path
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("EvalSymlinks: too many links in " + originalPath)
+ }
+
+ // A path beginning with `\\?\` represents the root, so automatically
+ // skip that part and begin processing the next segment.
+ if strings.HasPrefix(path, longpath.Prefix) {
+ b.WriteString(longpath.Prefix)
+ path = path[4:]
+ continue
+ }
+
+ // find next path component, p
+ var i = -1
+ for j, c := range path {
+ if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) {
+ i = j
+ break
+ }
+ }
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ if b.Len() == 0 {
+ // must be absolute path
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // If this is the first segment after the long path prefix, accept the
+ // current segment as a volume root or UNC share and move on to the next.
+ if b.String() == longpath.Prefix {
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+
+ fi, err := os.Lstat(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p)
+ if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') {
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+ return filepath.Clean(b.String()), nil
+}
+
+func isDriveOrRoot(p string) bool {
+ if p == string(filepath.Separator) {
+ return true
+ }
+
+ length := len(p)
+ if length >= 2 {
+ if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 000000000..056d19954
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ unixMaxTime := maxTime
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ // Take platform specific action for setting create time.
+ if err := setCTime(name, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
new file mode 100644
index 000000000..09d58bcbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+ "time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
new file mode 100644
index 000000000..45428c141
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package system
+
+import (
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+ ctimespec := windows.NsecToTimespec(ctime.UnixNano())
+ pathp, e := windows.UTF16PtrFromString(path)
+ if e != nil {
+ return e
+ }
+ h, e := windows.CreateFile(pathp,
+ windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
+ windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if e != nil {
+ return e
+ }
+ defer windows.Close(h)
+ c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
+ return windows.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 000000000..288318985
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNotSupportedPlatform means the platform is not supported.
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 000000000..192e36788
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,85 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ procCreateEvent = modkernel32.NewProc("CreateEventW")
+ procOpenEvent = modkernel32.NewProc("OpenEventW")
+ procSetEvent = modkernel32.NewProc("SetEvent")
+ procResetEvent = modkernel32.NewProc("ResetEvent")
+ procPulseEvent = modkernel32.NewProc("PulseEvent")
+)
+
+// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
+func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) {
+ namep, _ := windows.UTF16PtrFromString(name)
+ var _p1 uint32
+ if manualReset {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if initialState {
+ _p2 = 1
+ }
+ r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) {
+ namep, _ := windows.UTF16PtrFromString(name)
+ var _p1 uint32
+ if inheritHandle {
+ _p1 = 1
+ }
+ r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// SetEvent implements win32 SetEvent func in golang.
+func SetEvent(handle windows.Handle) (err error) {
+ return setResetPulse(handle, procSetEvent)
+}
+
+// ResetEvent implements win32 ResetEvent func in golang.
+func ResetEvent(handle windows.Handle) (err error) {
+ return setResetPulse(handle, procResetEvent)
+}
+
+// PulseEvent implements win32 PulseEvent func in golang.
+func PulseEvent(handle windows.Handle) (err error) {
+ return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) {
+ r0, _, _ := proc.Call(uintptr(handle))
+ if r0 != 0 {
+ err = syscall.Errno(r0)
+ }
+ return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+ temp = p
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go
new file mode 100644
index 000000000..60f0514b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "fmt"
+ "os/exec"
+ "syscall"
+)
+
+// GetExitCode returns the ExitStatus of the specified error if its type is
+// exec.ExitError, returns 0 and an error otherwise.
+func GetExitCode(err error) (int, error) {
+ exitCode := 0
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ return procExit.ExitStatus(), nil
+ }
+ }
+ return exitCode, fmt.Errorf("failed to get exit code")
+}
+
+// ProcessExitCode process the specified error and returns the exit status code
+// if the error was of type exec.ExitError, returns nothing otherwise.
+func ProcessExitCode(err error) (exitCode int) {
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = GetExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 000000000..102565f76
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,67 @@
+// +build !windows
+
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return MkdirAll(path, perm, sddl)
+}
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode, sddl string) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
+
+// The functions below here are wrappers for the equivalents in the os and ioutils packages.
+// They are passthrough on Unix platforms, and only relevant on Windows.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return os.Create(name)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return os.Open(name)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead. It opens the named file with specified flag
+// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
+// methods on the returned File can be used for I/O.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
+ return os.OpenFile(name, flag, perm)
+}
+
+// TempFileSequential creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ return ioutil.TempFile(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 000000000..a61b53d0b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,298 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ winio "github.com/Microsoft/go-winio"
+ "golang.org/x/sys/windows"
+)
+
+const (
+ // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
+ SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+ // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System
+ SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
+// with an appropriate SDDL defined ACL.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return mkdirall(path, true, sddl)
+}
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, _ os.FileMode, sddl string) error {
+ return mkdirall(path, false, sddl)
+}
+
+// mkdirall is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and can create a directory with
+// a DACL.
+func mkdirall(path string, applyACL bool, sddl string) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is largely copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = mkdirall(path[0:j-1], false, sddl)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
+ if applyACL {
+ err = mkdirWithACL(path, sddl)
+ } else {
+ err = os.Mkdir(path, 0)
+ }
+
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// mkdirWithACL creates a new directory. If there is an error, it will be of
+// type *PathError. .
+//
+// This is a modified and combined version of os.Mkdir and windows.Mkdir
+// in golang to cater for creating a directory am ACL permitting full
+// access, with inheritance, to any subfolder/file for Built-in Administrators
+// and Local System.
+func mkdirWithACL(name string, sddl string) error {
+ sa := windows.SecurityAttributes{Length: 0}
+ sd, err := winio.SddlToSecurityDescriptor(sddl)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
+
+ namep, err := windows.UTF16PtrFromString(name)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+
+ e := windows.CreateDirectory(namep, &sa)
+ if e != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: e}
+ }
+ return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
+
+// The origin of the functions below here are the golang OS and windows packages,
+// slightly modified to only cope with files, not directories due to the
+// specific use case.
+//
+// The alteration is to allow a file on Windows to be opened with
+// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
+// the standby list, particularly when accessing large files such as layer.tar.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDONLY, 0)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
+ if name == "" {
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
+ }
+ r, errf := windowsOpenFileSequential(name, flag, 0)
+ if errf == nil {
+ return r, nil
+ }
+ return nil, &os.PathError{Op: "open", Path: name, Err: errf}
+}
+
+func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
+ r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
+
+func makeInheritSa() *windows.SecurityAttributes {
+ var sa windows.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
+ if len(path) == 0 {
+ return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := windows.UTF16PtrFromString(path)
+ if err != nil {
+ return windows.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
+ case windows.O_RDONLY:
+ access = windows.GENERIC_READ
+ case windows.O_WRONLY:
+ access = windows.GENERIC_WRITE
+ case windows.O_RDWR:
+ access = windows.GENERIC_READ | windows.GENERIC_WRITE
+ }
+ if mode&windows.O_CREAT != 0 {
+ access |= windows.GENERIC_WRITE
+ }
+ if mode&windows.O_APPEND != 0 {
+ access &^= windows.GENERIC_WRITE
+ access |= windows.FILE_APPEND_DATA
+ }
+ sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
+ var sa *windows.SecurityAttributes
+ if mode&windows.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
+ createmode = windows.CREATE_NEW
+ case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
+ createmode = windows.CREATE_ALWAYS
+ case mode&windows.O_CREAT == windows.O_CREAT:
+ createmode = windows.OPEN_ALWAYS
+ case mode&windows.O_TRUNC == windows.O_TRUNC:
+ createmode = windows.TRUNCATE_EXISTING
+ default:
+ createmode = windows.OPEN_EXISTING
+ }
+ // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
+ //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+ const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
+ h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
+ return h, e
+}
+
+// Helpers for TempFileSequential
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+func nextSuffix() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
+// file access. Below is the original comment from golang:
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextSuffix())
+ f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go
new file mode 100644
index 000000000..17935088d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/init.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// Used by chtimes
+var maxTime time.Time
+
+func init() {
+ // chtimes initialization
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go
new file mode 100644
index 000000000..019c66441
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go
@@ -0,0 +1,17 @@
+package system
+
+import "os"
+
+// LCOWSupported determines if Linux Containers on Windows are supported.
+// Note: This feature is in development (06/17) and enabled through an
+// environment variable. At a future time, it will be enabled based
+// on build number. @jhowardmsft
+var lcowSupported = false
+
+func init() {
+ // LCOW initialization
+ if os.Getenv("LCOW_SUPPORTED") != "" {
+ lcowSupported = true
+ }
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
new file mode 100644
index 000000000..cff33bb40
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
new file mode 100644
index 000000000..e54d01e69
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
@@ -0,0 +1,6 @@
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return lcowSupported
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
new file mode 100644
index 000000000..bd23c4d50
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 000000000..e51df0daf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,14 @@
+package system
+
+import "os"
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+func Lstat(path string) (*StatT, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return fromStatT(&fi)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 000000000..3b6e947e6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 000000000..385f1d5e7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/go-units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given an io.Reader to the file.
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
new file mode 100644
index 000000000..925776e78
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
@@ -0,0 +1,129 @@
+// +build solaris,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// #cgo CFLAGS: -std=c99
+// #cgo LDFLAGS: -lkstat
+// #include <unistd.h>
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <kstat.h>
+// #include <sys/swap.h>
+// #include <sys/param.h>
+// struct swaptable *allocSwaptable(int num) {
+// struct swaptable *st;
+// struct swapent *swapent;
+// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+// swapent = st->swt_ent;
+// for (int i = 0; i < num; i++,swapent++) {
+// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+// }
+// st->swt_n = num;
+// return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+// struct swapent *swapent = st->swt_ent;
+// for (int i = 0; i < st->swt_n; i++,swapent++) {
+// free(swapent->ste_path);
+// }
+// free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+// return ent[i];
+// }
+// int64_t getPpKernel() {
+// int64_t pp_kernel = 0;
+// kstat_ctl_t *ksc;
+// kstat_t *ks;
+// kstat_named_t *knp;
+// kid_t kid;
+//
+// if ((ksc = kstat_open()) == NULL) {
+// return -1;
+// }
+// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+// return -1;
+// }
+// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+// return -1;
+// }
+// switch (knp->data_type) {
+// case KSTAT_DATA_UINT64:
+// pp_kernel = knp->value.ui64;
+// break;
+// case KSTAT_DATA_UINT32:
+// pp_kernel = knp->value.ui32;
+// break;
+// }
+// pp_kernel *= sysconf(_SC_PAGESIZE);
+// return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_PHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_AVPHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+ ppKernel := C.getPpKernel()
+ MemTotal := getTotalMem()
+ MemFree := getFreeMem()
+ SwapTotal, SwapFree, err := getSysSwap()
+
+ if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+ SwapFree < 0 {
+ return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal - int64(ppKernel)
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+ var tSwap int64
+ var fSwap int64
+ var diskblksPerPage int64
+ num, err := C.swapctl(C.SC_GETNSWP, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ st := C.allocSwaptable(num)
+ _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+ if err != nil {
+ C.freeSwaptable(st)
+ return -1, -1, err
+ }
+
+ diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+ for i := 0; i < int(num); i++ {
+ swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+ tSwap += int64(swapent.ste_pages) * diskblksPerPage
+ fSwap += int64(swapent.ste_free) * diskblksPerPage
+ }
+ C.freeSwaptable(st)
+ return tSwap, fSwap, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 000000000..3ce019dff
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 000000000..883944a4c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,45 @@
+package system
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 000000000..af79a6538
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 000000000..2e863c021
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go
new file mode 100644
index 000000000..f634a6be6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path.go
@@ -0,0 +1,21 @@
+package system
+
+import "runtime"
+
+const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+func DefaultPathEnv(platform string) string {
+ if runtime.GOOS == "windows" {
+ if platform != runtime.GOOS && LCOWSupported() {
+ return defaultUnixPathEnv
+ }
+ // Deliberately empty on Windows containers on Windows as the default path will be set by
+ // the container. Docker has no context of what the default path should be.
+ return ""
+ }
+ return defaultUnixPathEnv
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
new file mode 100644
index 000000000..f3762e69d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package system
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
new file mode 100644
index 000000000..aab891522
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package system
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be concatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go
new file mode 100644
index 000000000..26c8b42c1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go
@@ -0,0 +1,24 @@
+// +build linux freebsd solaris darwin
+
+package system
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsProcessAlive returns true if process with a given pid is running.
+func IsProcessAlive(pid int) bool {
+ err := unix.Kill(pid, syscall.Signal(0))
+ if err == nil || err == unix.EPERM {
+ return true
+ }
+
+ return false
+}
+
+// KillProcess force-stops a process.
+func KillProcess(pid int) {
+ unix.Kill(pid, unix.SIGKILL)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go
new file mode 100644
index 000000000..101b569a5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/rm.go
@@ -0,0 +1,80 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/pkg/errors"
+)
+
+// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
+// often be remedied.
+// Only use `EnsureRemoveAll` if you really want to make every effort to remove
+// a directory.
+//
+// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
+// can be a race between reading directory entries and then actually attempting
+// to remove everything in the directory.
+// These types of errors do not need to be returned since it's ok for the dir to
+// be gone we can just retry the remove operation.
+//
+// This should not return a `os.ErrNotExist` kind of error under any circumstances
+func EnsureRemoveAll(dir string) error {
+ notExistErr := make(map[string]bool)
+
+ // track retries
+ exitOnErr := make(map[string]int)
+ maxRetry := 5
+
+ // Attempt to unmount anything beneath this dir first
+ mount.RecursiveUnmount(dir)
+
+ for {
+ err := os.RemoveAll(dir)
+ if err == nil {
+ return err
+ }
+
+ pe, ok := err.(*os.PathError)
+ if !ok {
+ return err
+ }
+
+ if os.IsNotExist(err) {
+ if notExistErr[pe.Path] {
+ return err
+ }
+ notExistErr[pe.Path] = true
+
+ // There is a race where some subdir can be removed but after the parent
+ // dir entries have been read.
+ // So the path could be from `os.Remove(subdir)`
+ // If the reported non-existent path is not the passed in `dir` we
+ // should just retry, but otherwise return with no error.
+ if pe.Path == dir {
+ return nil
+ }
+ continue
+ }
+
+ if pe.Err != syscall.EBUSY {
+ return err
+ }
+
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ if e := mount.Unmount(pe.Path); e != nil {
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ return errors.Wrapf(e, "error while removing %s", dir)
+ }
+ }
+ }
+
+ if exitOnErr[pe.Path] == maxRetry {
+ return err
+ }
+ exitOnErr[pe.Path]++
+ time.Sleep(100 * time.Millisecond)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
new file mode 100644
index 000000000..715f05b93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 000000000..715f05b93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 000000000..66bf6e28e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,19 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
+// This is exposed on Linux as pkg/archive/changes uses it.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
new file mode 100644
index 000000000..b607dea94
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 000000000..b607dea94
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go
new file mode 100644
index 000000000..91c7d121c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 000000000..6c6397268
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,49 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, size, etc about a file.
+type StatT struct {
+ mode os.FileMode
+ size int64
+ mtim time.Time
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return os.FileMode(s.mode)
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() time.Time {
+ return time.Time(s.mtim)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ fi, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+ return fromStatT(&fi)
+}
+
+// fromStatT converts a os.FileInfo type to a system.StatT type
+func fromStatT(fi *os.FileInfo) (*StatT, error) {
+ return &StatT{
+ size: (*fi).Size(),
+ mode: (*fi).Mode(),
+ mtim: (*fi).ModTime()}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
new file mode 100644
index 000000000..49dbdd378
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "golang.org/x/sys/unix"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+ return unix.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ return []string{commandLine}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
new file mode 100644
index 000000000..23e9b207c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -0,0 +1,122 @@
+package system
+
+import (
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+ procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+ procGetProductInfo = modkernel32.NewProc("GetProductInfo")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+ OSVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformID uint32
+ CSDVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ Reserve byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+ var err error
+ osv := OSVersion{}
+ osv.Version, err = windows.GetVersion()
+ if err != nil {
+ // GetVersion never fails.
+ panic(err)
+ }
+ osv.MajorVersion = uint8(osv.Version & 0xFF)
+ osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+ osv.Build = uint16(osv.Version >> 16)
+ return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsWindowsClient() bool {
+ osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+ r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+ if r1 == 0 {
+ logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+ return false
+ }
+ const verNTWorkstation = 0x00000001
+ return osviex.ProductType == verNTWorkstation
+}
+
+// IsIoTCore returns true if the currently running image is based off of
+// Windows 10 IoT Core.
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsIoTCore() bool {
+ var returnedProductType uint32
+ r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
+ if r1 == 0 {
+ logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
+ return false
+ }
+ const productIoTUAP = 0x0000007B
+ const productIoTUAPCommercial = 0x00000083
+ return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+ return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ var argc int32
+
+ argsPtr, err := windows.UTF16PtrFromString(commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ argv, err := windows.CommandLineToArgv(argsPtr, &argc)
+ if err != nil {
+ return nil, err
+ }
+ defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv))))
+
+ newArgs := make([]string, argc)
+ for i, v := range (*argv)[:argc] {
+ newArgs[i] = string(windows.UTF16ToString((*v)[:]))
+ }
+
+ return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+ // For now, check for ntuser API support on the host. In the future, a host
+ // may support win32k in containers even if the host does not support ntuser
+ // APIs.
+ return ntuserApiset.Load() == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 000000000..5a10eda5a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+ return unix.Umask(newmask), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 000000000..13f1de176
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 000000000..6a7752437
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,24 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 000000000..edc588a63
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,25 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ atFdCwd := unix.AT_FDCWD
+
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 000000000..139714544
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd
+
+package system
+
+import "syscall"
+
+// LUtimesNano is only supported on linux and freebsd.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 000000000..98b111be4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,29 @@
+package system
+
+import "golang.org/x/sys/unix"
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+ if errno == unix.ENODATA {
+ return nil, nil
+ }
+ if errno == unix.ERANGE {
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ }
+ if errno != nil {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return unix.Lsetxattr(path, attr, data, flags)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 000000000..0114f2227
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
new file mode 100644
index 000000000..f5262bccf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/ascii.go
@@ -0,0 +1,66 @@
+package term
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ASCII list the possible supported ASCII key sequence
+var ASCII = []string{
+ "ctrl-@",
+ "ctrl-a",
+ "ctrl-b",
+ "ctrl-c",
+ "ctrl-d",
+ "ctrl-e",
+ "ctrl-f",
+ "ctrl-g",
+ "ctrl-h",
+ "ctrl-i",
+ "ctrl-j",
+ "ctrl-k",
+ "ctrl-l",
+ "ctrl-m",
+ "ctrl-n",
+ "ctrl-o",
+ "ctrl-p",
+ "ctrl-q",
+ "ctrl-r",
+ "ctrl-s",
+ "ctrl-t",
+ "ctrl-u",
+ "ctrl-v",
+ "ctrl-w",
+ "ctrl-x",
+ "ctrl-y",
+ "ctrl-z",
+ "ctrl-[",
+ "ctrl-\\",
+ "ctrl-]",
+ "ctrl-^",
+ "ctrl-_",
+}
+
+// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
+func ToBytes(keys string) ([]byte, error) {
+ codes := []byte{}
+next:
+ for _, key := range strings.Split(keys, ",") {
+ if len(key) != 1 {
+ for code, ctrl := range ASCII {
+ if ctrl == key {
+ codes = append(codes, byte(code))
+ continue next
+ }
+ }
+ if key == "DEL" {
+ codes = append(codes, 127)
+ } else {
+ return nil, fmt.Errorf("Unknown character: '%s'", key)
+ }
+ } else {
+ codes = append(codes, byte(key[0]))
+ }
+ }
+ return codes, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go
new file mode 100644
index 000000000..e648eb812
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/proxy.go
@@ -0,0 +1,74 @@
+package term
+
+import (
+ "io"
+)
+
+// EscapeError is special error which returned by a TTY proxy reader's Read()
+// method in case its detach escape sequence is read.
+type EscapeError struct{}
+
+func (EscapeError) Error() string {
+ return "read escape sequence"
+}
+
+// escapeProxy is used only for attaches with a TTY. It is used to proxy
+// stdin keypresses from the underlying reader and look for the passed in
+// escape key sequence to signal a detach.
+type escapeProxy struct {
+ escapeKeys []byte
+ escapeKeyPos int
+ r io.Reader
+}
+
+// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader
+// and detects when the specified escape keys are read, in which case the Read
+// method will return an error of type EscapeError.
+func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader {
+ return &escapeProxy{
+ escapeKeys: escapeKeys,
+ r: r,
+ }
+}
+
+func (r *escapeProxy) Read(buf []byte) (int, error) {
+ nr, err := r.r.Read(buf)
+
+ preserve := func() {
+ // this preserves the original key presses in the passed in buffer
+ nr += r.escapeKeyPos
+ preserve := make([]byte, 0, r.escapeKeyPos+len(buf))
+ preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...)
+ preserve = append(preserve, buf...)
+ r.escapeKeyPos = 0
+ copy(buf[0:nr], preserve)
+ }
+
+ if nr != 1 || err != nil {
+ if r.escapeKeyPos > 0 {
+ preserve()
+ }
+ return nr, err
+ }
+
+ if buf[0] != r.escapeKeys[r.escapeKeyPos] {
+ if r.escapeKeyPos > 0 {
+ preserve()
+ }
+ return nr, nil
+ }
+
+ if r.escapeKeyPos == len(r.escapeKeys)-1 {
+ return 0, EscapeError{}
+ }
+
+ // Looks like we've got an escape key, but we need to match again on the next
+ // read.
+ // Store the current escape key we found so we can look for the next one on
+ // the next read.
+ // Since this is an escape key, make sure we don't let the caller read it
+ // If later on we find that this is not the escape sequence, we'll add the
+ // keys back
+ r.escapeKeyPos++
+ return nr - r.escapeKeyPos, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go
new file mode 100644
index 000000000..6d2dfd3a8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc.go
@@ -0,0 +1,21 @@
+// +build !windows
+// +build !solaris !cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
+ return err
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
new file mode 100644
index 000000000..50234affc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
@@ -0,0 +1,65 @@
+// +build solaris,cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// #include <termios.h>
+import "C"
+
+// Termios is the Unix API for terminal I/O.
+// It is passthrough for unix.Termios in order to make it portable with
+// other platforms where it is not available or handled differently.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if err := tcget(fd, &oldState.termios); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+
+ newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY)
+ newState.Oflag &^= unix.OPOST
+ newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+ newState.Cflag &^= (unix.CSIZE | unix.PARENB)
+ newState.Cflag |= unix.CS8
+
+ /*
+ VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
+ Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
+ needs to be explicitly set to 1.
+ */
+ newState.Cc[C.VMIN] = 1
+ newState.Cc[C.VTIME] = 0
+
+ if err := tcset(fd, &newState); err != 0 {
+ return nil, err
+ }
+ return &oldState, nil
+}
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go
new file mode 100644
index 000000000..4f59d8d93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term.go
@@ -0,0 +1,124 @@
+// +build !windows
+
+// Package term provides structures and helper functions to work with
+// terminal (state, sizes).
+package term
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ // ErrInvalidState is returned if the state of the terminal is invalid.
+ ErrInvalidState = errors.New("Invalid terminal state")
+)
+
+// State represents the state of the terminal.
+type State struct {
+ termios Termios
+}
+
+// Winsize represents the size of the terminal window.
+type Winsize struct {
+ Height uint16
+ Width uint16
+ x uint16
+ y uint16
+}
+
+// StdStreams returns the standard streams (stdin, stdout, stderr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+ return os.Stdin, os.Stdout, os.Stderr
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+ var inFd uintptr
+ var isTerminalIn bool
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminalIn = IsTerminal(inFd)
+ }
+ return inFd, isTerminalIn
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios Termios
+ return tcget(fd, &termios) == 0
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+ if state == nil {
+ return ErrInvalidState
+ }
+ if err := tcset(fd, &state.termios); err != 0 {
+ return err
+ }
+ return nil
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+ var oldState State
+ if err := tcget(fd, &oldState.termios); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
+
+// DisableEcho applies the specified state to the terminal connected to the file
+// descriptor, with echo disabled.
+func DisableEcho(fd uintptr, state *State) error {
+ newState := state.termios
+ newState.Lflag &^= unix.ECHO
+
+ if err := tcset(fd, &newState); err != 0 {
+ return err
+ }
+ handleInterrupt(fd, state)
+ return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ oldState, err := MakeRaw(fd)
+ if err != nil {
+ return nil, err
+ }
+ handleInterrupt(fd, oldState)
+ return oldState, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ return nil, nil
+}
+
+func handleInterrupt(fd uintptr, state *State) {
+ sigchan := make(chan os.Signal, 1)
+ signal.Notify(sigchan, os.Interrupt)
+ go func() {
+ for range sigchan {
+ // quit cleanly and the new terminal item is on a new line
+ fmt.Println()
+ signal.Stop(sigchan)
+ close(sigchan)
+ RestoreTerminal(fd, state)
+ os.Exit(1)
+ }
+ }()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
new file mode 100644
index 000000000..c0332c3cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -0,0 +1,237 @@
+// +build windows
+
+package term
+
+import (
+ "io"
+ "os"
+ "os/signal"
+ "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE
+
+ "github.com/Azure/go-ansiterm/winterm"
+ "github.com/docker/docker/pkg/term/windows"
+)
+
+// State holds the console mode for the terminal.
+type State struct {
+ mode uint32
+}
+
+// Winsize is used for window size.
+type Winsize struct {
+ Height uint16
+ Width uint16
+}
+
+const (
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
+ enableVirtualTerminalInput = 0x0200
+ enableVirtualTerminalProcessing = 0x0004
+ disableNewlineAutoReturn = 0x0008
+)
+
+// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
+var vtInputSupported bool
+
+// StdStreams returns the standard streams (stdin, stdout, stderr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+ // Turn on VT handling on all std handles, if possible. This might
+ // fail, in which case we will fall back to terminal emulation.
+ var emulateStdin, emulateStdout, emulateStderr bool
+ fd := os.Stdin.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate that enableVirtualTerminalInput is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
+ emulateStdin = true
+ } else {
+ vtInputSupported = true
+ }
+ // Unconditionally set the console mode back even on failure because SetConsoleMode
+ // remembers invalid bits on input handles.
+ winterm.SetConsoleMode(fd, mode)
+ }
+
+ fd = os.Stdout.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate disableNewlineAutoReturn is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ emulateStdout = true
+ } else {
+ winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ }
+ }
+
+ fd = os.Stderr.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate disableNewlineAutoReturn is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ emulateStderr = true
+ } else {
+ winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ }
+ }
+
+ if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" {
+ // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well.
+ emulateStdin = true
+ emulateStdout = false
+ emulateStderr = false
+ }
+
+ // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and
+ // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as
+ // go-ansiterm hasn't switch to x/sys/windows.
+ // TODO: switch back to x/sys/windows once go-ansiterm has switched
+ if emulateStdin {
+ stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE)
+ } else {
+ stdIn = os.Stdin
+ }
+
+ if emulateStdout {
+ stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
+ } else {
+ stdOut = os.Stdout
+ }
+
+ if emulateStderr {
+ stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
+ } else {
+ stdErr = os.Stderr
+ }
+
+ return
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+ return windowsconsole.GetHandleInfo(in)
+}
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ info, err := winterm.GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ winsize := &Winsize{
+ Width: uint16(info.Window.Right - info.Window.Left + 1),
+ Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+ }
+
+ return winsize, nil
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ return windowsconsole.IsConsole(fd)
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+ return winterm.SetConsoleMode(fd, state.mode)
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+ mode, e := winterm.GetConsoleMode(fd)
+ if e != nil {
+ return nil, e
+ }
+
+ return &State{mode: mode}, nil
+}
+
+// DisableEcho disables echo for the terminal connected to the given file descriptor.
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+func DisableEcho(fd uintptr, state *State) error {
+ mode := state.mode
+ mode &^= winterm.ENABLE_ECHO_INPUT
+ mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
+ err := winterm.SetConsoleMode(fd, mode)
+ if err != nil {
+ return err
+ }
+
+ // Register an interrupt handler to catch and restore prior state
+ restoreAtInterrupt(fd, state)
+ return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ state, err := MakeRaw(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Register an interrupt handler to catch and restore prior state
+ restoreAtInterrupt(fd, state)
+ return state, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ignore failures, since disableNewlineAutoReturn might not be supported on this
+ // version of Windows.
+ winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
+ return state, err
+}
+
+// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ mode := state.mode
+
+ // See
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+ // Disable these modes
+ mode &^= winterm.ENABLE_ECHO_INPUT
+ mode &^= winterm.ENABLE_LINE_INPUT
+ mode &^= winterm.ENABLE_MOUSE_INPUT
+ mode &^= winterm.ENABLE_WINDOW_INPUT
+ mode &^= winterm.ENABLE_PROCESSED_INPUT
+
+ // Enable these modes
+ mode |= winterm.ENABLE_EXTENDED_FLAGS
+ mode |= winterm.ENABLE_INSERT_MODE
+ mode |= winterm.ENABLE_QUICK_EDIT_MODE
+ if vtInputSupported {
+ mode |= enableVirtualTerminalInput
+ }
+
+ err = winterm.SetConsoleMode(fd, mode)
+ if err != nil {
+ return nil, err
+ }
+ return state, nil
+}
+
+func restoreAtInterrupt(fd uintptr, state *State) {
+ sigchan := make(chan os.Signal, 1)
+ signal.Notify(sigchan, os.Interrupt)
+
+ go func() {
+ _ = <-sigchan
+ RestoreTerminal(fd, state)
+ os.Exit(0)
+ }()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
new file mode 100644
index 000000000..c47341e87
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
@@ -0,0 +1,42 @@
+// +build darwin freebsd openbsd
+
+package term
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ getTermios = unix.TIOCGETA
+ setTermios = unix.TIOCSETA
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+ newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+ newState.Oflag &^= unix.OPOST
+ newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+ newState.Cflag &^= (unix.CSIZE | unix.PARENB)
+ newState.Cflag |= unix.CS8
+ newState.Cc[unix.VMIN] = 1
+ newState.Cc[unix.VTIME] = 0
+
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
new file mode 100644
index 000000000..3e25eb7a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
@@ -0,0 +1,37 @@
+package term
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ getTermios = unix.TCGETS
+ setTermios = unix.TCSETS
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ termios, err := unix.IoctlGetTermios(int(fd), getTermios)
+ if err != nil {
+ return nil, err
+ }
+
+ var oldState State
+ oldState.termios = Termios(*termios)
+
+ termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+ termios.Oflag &^= unix.OPOST
+ termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+ termios.Cflag &^= (unix.CSIZE | unix.PARENB)
+ termios.Cflag |= unix.CS8
+
+ if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil {
+ return nil, err
+ }
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
new file mode 100644
index 000000000..29d396318
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
@@ -0,0 +1,263 @@
+// +build windows
+
+package windowsconsole
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "unsafe"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+const (
+ escapeSequence = ansiterm.KEY_ESC_CSI
+)
+
+// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
+type ansiReader struct {
+ file *os.File
+ fd uintptr
+ buffer []byte
+ cbBuffer int
+ command []byte
+}
+
+// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
+// Windows console input handle.
+func NewAnsiReader(nFile int) io.ReadCloser {
+ initLogger()
+ file, fd := winterm.GetStdFile(nFile)
+ return &ansiReader{
+ file: file,
+ fd: fd,
+ command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+ buffer: make([]byte, 0),
+ }
+}
+
+// Close closes the wrapped file.
+func (ar *ansiReader) Close() (err error) {
+ return ar.file.Close()
+}
+
+// Fd returns the file descriptor of the wrapped file.
+func (ar *ansiReader) Fd() uintptr {
+ return ar.fd
+}
+
+// Read reads up to len(p) bytes of translated input events into p.
+func (ar *ansiReader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ // Previously read bytes exist, read as much as we can and return
+ if len(ar.buffer) > 0 {
+ logger.Debugf("Reading previously cached bytes")
+
+ originalLength := len(ar.buffer)
+ copiedLength := copy(p, ar.buffer)
+
+ if copiedLength == originalLength {
+ ar.buffer = make([]byte, 0, len(p))
+ } else {
+ ar.buffer = ar.buffer[copiedLength:]
+ }
+
+ logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
+ return copiedLength, nil
+ }
+
+ // Read and translate key events
+ events, err := readInputEvents(ar.fd, len(p))
+ if err != nil {
+ return 0, err
+ } else if len(events) == 0 {
+ logger.Debug("No input events detected")
+ return 0, nil
+ }
+
+ keyBytes := translateKeyEvents(events, []byte(escapeSequence))
+
+ // Save excess bytes and right-size keyBytes
+ if len(keyBytes) > len(p) {
+ logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
+ ar.buffer = keyBytes[len(p):]
+ keyBytes = keyBytes[:len(p)]
+ } else if len(keyBytes) == 0 {
+ logger.Debug("No key bytes returned from the translator")
+ return 0, nil
+ }
+
+ copiedLength := copy(p, keyBytes)
+ if copiedLength != len(keyBytes) {
+ return 0, errors.New("unexpected copy length encountered")
+ }
+
+ logger.Debugf("Read p[%d]: % x", copiedLength, p)
+ logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
+ return copiedLength, nil
+}
+
+// readInputEvents polls until at least one event is available.
+func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
+ // Determine the maximum number of records to retrieve
+ // -- Cast around the type system to obtain the size of a single INPUT_RECORD.
+ // unsafe.Sizeof requires an expression vs. a type-reference; the casting
+ // tricks the type system into believing it has such an expression.
+ recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
+ countRecords := maxBytes / recordSize
+ if countRecords > ansiterm.MAX_INPUT_EVENTS {
+ countRecords = ansiterm.MAX_INPUT_EVENTS
+ } else if countRecords == 0 {
+ countRecords = 1
+ }
+ logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
+
+ // Wait for and read input events
+ events := make([]winterm.INPUT_RECORD, countRecords)
+ nEvents := uint32(0)
+ eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
+ if err != nil {
+ return nil, err
+ }
+
+ if eventsExist {
+ err = winterm.ReadConsoleInput(fd, events, &nEvents)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Return a slice restricted to the number of returned records
+ logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
+ return events[:nEvents], nil
+}
+
+// KeyEvent Translation Helpers
+
+var arrowKeyMapPrefix = map[uint16]string{
+ winterm.VK_UP: "%s%sA",
+ winterm.VK_DOWN: "%s%sB",
+ winterm.VK_RIGHT: "%s%sC",
+ winterm.VK_LEFT: "%s%sD",
+}
+
+var keyMapPrefix = map[uint16]string{
+ winterm.VK_UP: "\x1B[%sA",
+ winterm.VK_DOWN: "\x1B[%sB",
+ winterm.VK_RIGHT: "\x1B[%sC",
+ winterm.VK_LEFT: "\x1B[%sD",
+ winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1
+ winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4
+ winterm.VK_INSERT: "\x1B[2%s~",
+ winterm.VK_DELETE: "\x1B[3%s~",
+ winterm.VK_PRIOR: "\x1B[5%s~",
+ winterm.VK_NEXT: "\x1B[6%s~",
+ winterm.VK_F1: "",
+ winterm.VK_F2: "",
+ winterm.VK_F3: "\x1B[13%s~",
+ winterm.VK_F4: "\x1B[14%s~",
+ winterm.VK_F5: "\x1B[15%s~",
+ winterm.VK_F6: "\x1B[17%s~",
+ winterm.VK_F7: "\x1B[18%s~",
+ winterm.VK_F8: "\x1B[19%s~",
+ winterm.VK_F9: "\x1B[20%s~",
+ winterm.VK_F10: "\x1B[21%s~",
+ winterm.VK_F11: "\x1B[23%s~",
+ winterm.VK_F12: "\x1B[24%s~",
+}
+
+// translateKeyEvents converts the input events into the appropriate ANSI string.
+func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
+ var buffer bytes.Buffer
+ for _, event := range events {
+ if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
+ buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
+ }
+ }
+
+ return buffer.Bytes()
+}
+
+// keyToString maps the given input event record to the corresponding string.
+func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
+ if keyEvent.UnicodeChar == 0 {
+ return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
+ }
+
+ _, alt, control := getControlKeys(keyEvent.ControlKeyState)
+ if control {
+ // TODO(azlinux): Implement following control sequences
+ // <Ctrl>-D Signals the end of input from the keyboard; also exits current shell.
+ // <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key.
+ // <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s.
+ // <Ctrl>-S Suspends printing on the screen (does not stop the program).
+ // <Ctrl>-U Deletes all characters on the current line. Also called the KILL key.
+ // <Ctrl>-E Quits current command and creates a core
+
+ }
+
+ // <Alt>+Key generates ESC N Key
+ if !control && alt {
+ return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
+ }
+
+ return string(keyEvent.UnicodeChar)
+}
+
+// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
+func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
+ shift, alt, control := getControlKeys(controlState)
+ modifier := getControlKeysModifier(shift, alt, control)
+
+ if format, ok := arrowKeyMapPrefix[key]; ok {
+ return fmt.Sprintf(format, escapeSequence, modifier)
+ }
+
+ if format, ok := keyMapPrefix[key]; ok {
+ return fmt.Sprintf(format, modifier)
+ }
+
+ return ""
+}
+
+// getControlKeys extracts the shift, alt, and ctrl key states.
+func getControlKeys(controlState uint32) (shift, alt, control bool) {
+ shift = 0 != (controlState & winterm.SHIFT_PRESSED)
+ alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
+ control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
+ return shift, alt, control
+}
+
+// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
+func getControlKeysModifier(shift, alt, control bool) string {
+ if shift && alt && control {
+ return ansiterm.KEY_CONTROL_PARAM_8
+ }
+ if alt && control {
+ return ansiterm.KEY_CONTROL_PARAM_7
+ }
+ if shift && control {
+ return ansiterm.KEY_CONTROL_PARAM_6
+ }
+ if control {
+ return ansiterm.KEY_CONTROL_PARAM_5
+ }
+ if shift && alt {
+ return ansiterm.KEY_CONTROL_PARAM_4
+ }
+ if alt {
+ return ansiterm.KEY_CONTROL_PARAM_3
+ }
+ if shift {
+ return ansiterm.KEY_CONTROL_PARAM_2
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
new file mode 100644
index 000000000..256577e1f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package windowsconsole
+
+import (
+ "io"
+ "os"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
+type ansiWriter struct {
+ file *os.File
+ fd uintptr
+ infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO
+ command []byte
+ escapeSequence []byte
+ inAnsiSequence bool
+ parser *ansiterm.AnsiParser
+}
+
+// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
+// Windows console output handle.
+func NewAnsiWriter(nFile int) io.Writer {
+ initLogger()
+ file, fd := winterm.GetStdFile(nFile)
+ info, err := winterm.GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil
+ }
+
+ parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
+ logger.Infof("newAnsiWriter: parser %p", parser)
+
+ aw := &ansiWriter{
+ file: file,
+ fd: fd,
+ infoReset: info,
+ command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+ escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
+ parser: parser,
+ }
+
+ logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
+ logger.Infof("newAnsiWriter: %v", aw)
+ return aw
+}
+
+func (aw *ansiWriter) Fd() uintptr {
+ return aw.fd
+}
+
+// Write writes len(p) bytes from p to the underlying data stream.
+func (aw *ansiWriter) Write(p []byte) (total int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ logger.Infof("Write: % x", p)
+ logger.Infof("Write: %s", string(p))
+ return aw.parser.Parse(p)
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go
new file mode 100644
index 000000000..4bad32ea7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go
@@ -0,0 +1,35 @@
+// +build windows
+
+package windowsconsole
+
+import (
+ "os"
+
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
+func GetHandleInfo(in interface{}) (uintptr, bool) {
+ switch t := in.(type) {
+ case *ansiReader:
+ return t.Fd(), true
+ case *ansiWriter:
+ return t.Fd(), true
+ }
+
+ var inFd uintptr
+ var isTerminal bool
+
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminal = IsConsole(inFd)
+ }
+ return inFd, isTerminal
+}
+
+// IsConsole returns true if the given file descriptor is a Windows Console.
+// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
+func IsConsole(fd uintptr) bool {
+ _, e := winterm.GetConsoleMode(fd)
+ return e == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
new file mode 100644
index 000000000..c02a93a03
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
@@ -0,0 +1,33 @@
+// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
+// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
+// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
+
+package windowsconsole
+
+import (
+ "io/ioutil"
+ "os"
+ "sync"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+var initOnce sync.Once
+
+func initLogger() {
+ initOnce.Do(func() {
+ logFile := ioutil.Discard
+
+ if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+ logFile, _ = os.Create("ansiReaderWriter.log")
+ }
+
+ logger = &logrus.Logger{
+ Out: logFile,
+ Formatter: new(logrus.TextFormatter),
+ Level: logrus.DebugLevel,
+ }
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go
new file mode 100644
index 000000000..f58367fe6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/winsize.go
@@ -0,0 +1,30 @@
+// +build !solaris,!windows
+
+package term
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ ws := &Winsize{}
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
+ // Skipp errno = 0
+ if err == 0 {
+ return ws, nil
+ }
+ return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
+ // Skipp errno = 0
+ if err == 0 {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go
new file mode 100644
index 000000000..39c1d3207
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go
@@ -0,0 +1,42 @@
+// +build solaris,cgo
+
+package term
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+/*
+#include <unistd.h>
+#include <stropts.h>
+#include <termios.h>
+
+// Small wrapper to get rid of variadic args of ioctl()
+int my_ioctl(int fd, int cmd, struct winsize *ws) {
+ return ioctl(fd, cmd, ws);
+}
+*/
+import "C"
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ ws := &Winsize{}
+ ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+ // Skip retval = 0
+ if ret == 0 {
+ return ws, nil
+ }
+ return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+ // Skip retval = 0
+ if ret == 0 {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
new file mode 100644
index 000000000..e4dec3a5d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
@@ -0,0 +1,11 @@
+// +build go1.8
+
+package tlsconfig
+
+import "crypto/tls"
+
+// Clone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func Clone(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
new file mode 100644
index 000000000..0d5b448fe
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
@@ -0,0 +1,33 @@
+// +build go1.7,!go1.8
+
+package tlsconfig
+
+import "crypto/tls"
+
+// Clone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func Clone(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
new file mode 100644
index 000000000..74776e65e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
@@ -0,0 +1,139 @@
+// Package truncindex provides a general 'index tree', used by Docker
+// in order to be able to reference containers by only a few unambiguous
+// characters of their id.
+package truncindex
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/tchap/go-patricia/patricia"
+)
+
+var (
+ // ErrEmptyPrefix is an error returned if the prefix was empty.
+ ErrEmptyPrefix = errors.New("Prefix can't be empty")
+
+ // ErrIllegalChar is returned when a space is in the ID
+ ErrIllegalChar = errors.New("illegal character: ' '")
+
+ // ErrNotExist is returned when ID or its prefix not found in index.
+ ErrNotExist = errors.New("ID does not exist")
+)
+
+// ErrAmbiguousPrefix is returned if the prefix was ambiguous
+// (multiple ids for the prefix).
+type ErrAmbiguousPrefix struct {
+ prefix string
+}
+
+func (e ErrAmbiguousPrefix) Error() string {
+ return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
+}
+
+// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
+// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
+type TruncIndex struct {
+ sync.RWMutex
+ trie *patricia.Trie
+ ids map[string]struct{}
+}
+
+// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
+func NewTruncIndex(ids []string) (idx *TruncIndex) {
+ idx = &TruncIndex{
+ ids: make(map[string]struct{}),
+
+ // Change patricia max prefix per node length,
+ // because our len(ID) always 64
+ trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
+ }
+ for _, id := range ids {
+ idx.addID(id)
+ }
+ return
+}
+
+func (idx *TruncIndex) addID(id string) error {
+ if strings.Contains(id, " ") {
+ return ErrIllegalChar
+ }
+ if id == "" {
+ return ErrEmptyPrefix
+ }
+ if _, exists := idx.ids[id]; exists {
+ return fmt.Errorf("id already exists: '%s'", id)
+ }
+ idx.ids[id] = struct{}{}
+ if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
+ return fmt.Errorf("failed to insert id: %s", id)
+ }
+ return nil
+}
+
+// Add adds a new ID to the TruncIndex.
+func (idx *TruncIndex) Add(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ return idx.addID(id)
+}
+
+// Delete removes an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Delete(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if _, exists := idx.ids[id]; !exists || id == "" {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ delete(idx.ids, id)
+ if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ return nil
+}
+
+// Get retrieves an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Get(s string) (string, error) {
+ if s == "" {
+ return "", ErrEmptyPrefix
+ }
+ var (
+ id string
+ )
+ subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
+ if id != "" {
+ // we haven't found the ID if there are two or more IDs
+ id = ""
+ return ErrAmbiguousPrefix{prefix: string(prefix)}
+ }
+ id = string(prefix)
+ return nil
+ }
+
+ idx.RLock()
+ defer idx.RUnlock()
+ if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
+ return "", err
+ }
+ if id != "" {
+ return id, nil
+ }
+ return "", ErrNotExist
+}
+
+// Iterate iterates over all stored IDs and passes each of them to the given
+// handler. Take care that the handler method does not call any public
+// method on truncindex as the internal locking is not reentrant/recursive
+// and will result in deadlock.
+func (idx *TruncIndex) Iterate(handler func(id string)) {
+ idx.Lock()
+ defer idx.Unlock()
+ idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
+ handler(string(prefix))
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf
new file mode 100644
index 000000000..7608b0e33
--- /dev/null
+++ b/vendor/github.com/docker/docker/vendor.conf
@@ -0,0 +1,147 @@
+# the following lines are in sorted order, FYI
+github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
+github.com/Microsoft/hcsshim v0.6.2
+github.com/Microsoft/go-winio v0.4.4
+github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git
+github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
+github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
+github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
+github.com/gorilla/context v1.1
+github.com/gorilla/mux v1.1
+github.com/jhowardmsft/opengcs v0.0.12
+github.com/kr/pty 5cf931ef8f
+github.com/mattn/go-shellwords v1.0.3
+github.com/sirupsen/logrus v1.0.1
+github.com/tchap/go-patricia v2.2.6
+github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
+golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
+github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
+github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
+github.com/pmezard/go-difflib v1.0.0
+
+github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
+github.com/imdario/mergo 0.2.1
+golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
+
+#get libnetwork packages
+github.com/docker/libnetwork 248fd5ea6a67f8810da322e6e7441e8de96a9045 https://github.com/dmcgowan/libnetwork.git
+github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
+github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
+github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
+github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
+github.com/hashicorp/memberlist v0.1.0
+github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
+github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
+github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
+github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
+github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
+github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
+github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
+github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
+github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
+github.com/coreos/etcd v3.2.1
+github.com/coreos/go-semver v0.2.0
+github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
+github.com/hashicorp/consul v0.5.2
+github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
+github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
+
+# get graph and distribution packages
+github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
+github.com/vbatts/tar-split v0.10.1
+github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
+
+# get go-zfs packages
+github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
+github.com/pborman/uuid v1.0
+
+google.golang.org/grpc v1.3.0
+
+# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
+github.com/opencontainers/runc e9325d442f5979c4f79bfa9e09bdf7abb74ba03b https://github.com/dmcgowan/runc.git
+github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
+github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs
+
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
+
+# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
+github.com/coreos/go-systemd v4
+github.com/godbus/dbus v4.0.0
+github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
+github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
+
+# gelf logging driver deps
+github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea
+
+github.com/fluent/fluent-logger-golang v1.2.1
+# fluent-logger-golang deps
+github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
+github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
+
+# fsnotify
+github.com/fsnotify/fsnotify v1.4.2
+
+# awslogs deps
+github.com/aws/aws-sdk-go v1.4.22
+github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0
+github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
+
+# logentries
+github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf
+
+# gcplogs deps
+golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0
+google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823
+cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525
+github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
+google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
+
+# containerd
+github.com/containerd/containerd fc10004571bb9b26695ccbf2dd4a83213f60b93e https://github.com/dmcgowan/containerd.git
+github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
+github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
+github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
+
+# cluster
+github.com/docker/swarmkit 8bdecc57887ffc598b63d6433f58e0d2852112c3 https://github.com/dmcgowan/swarmkit.git
+github.com/gogo/protobuf v0.4
+github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
+github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
+golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
+golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
+github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
+github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
+github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
+github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
+github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
+github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
+github.com/matttproud/golang_protobuf_extensions v1.0.0
+github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
+github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
+
+# cli
+github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
+github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
+github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
+github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
+
+# metrics
+github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
+
+github.com/opencontainers/selinux v1.0.0-rc1
+
+# archive/tar
+# mkdir -p ./vendor/archive
+# git clone git://github.com/tonistiigi/go-1.git ./go
+# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore
+# cp -a go/src/archive/tar ./vendor/archive/tar
+# rm -rf ./go
+# vndr
diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE
new file mode 100644
index 000000000..b55b37bc3
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md
new file mode 100644
index 000000000..d257e44fd
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/README.md
@@ -0,0 +1,13 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections)
+
+# Introduction
+
+go-connections provides common package to work with network connections.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation.
+
+## License
+
+go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
new file mode 100644
index 000000000..4d5f5ae63
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/nat.go
@@ -0,0 +1,242 @@
+// Package nat is a convenience package for manipulation of strings describing network ports.
+package nat
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+)
+
+const (
+ // portSpecTemplate is the expected format for port specifications
+ portSpecTemplate = "ip:hostPort:containerPort"
+)
+
+// PortBinding represents a binding between a Host IP address and a Host Port
+type PortBinding struct {
+ // HostIP is the host IP Address
+ HostIP string `json:"HostIp"`
+ // HostPort is the host port number
+ HostPort string
+}
+
+// PortMap is a collection of PortBinding indexed by Port
+type PortMap map[Port][]PortBinding
+
+// PortSet is a collection of structs indexed by Port
+type PortSet map[Port]struct{}
+
+// Port is a string containing port number and protocol in the format "80/tcp"
+type Port string
+
+// NewPort creates a new instance of a Port given a protocol and port number or port range
+func NewPort(proto, port string) (Port, error) {
+ // Check for parsing issues on "port" now so we can avoid having
+ // to check it later on.
+
+ portStartInt, portEndInt, err := ParsePortRangeToInt(port)
+ if err != nil {
+ return "", err
+ }
+
+ if portStartInt == portEndInt {
+ return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
+ }
+ return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
+}
+
+// ParsePort parses the port number string and returns an int
+func ParsePort(rawPort string) (int, error) {
+ if len(rawPort) == 0 {
+ return 0, nil
+ }
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// ParsePortRangeToInt parses the port range string and returns start/end ints
+func ParsePortRangeToInt(rawPort string) (int, int, error) {
+ if len(rawPort) == 0 {
+ return 0, 0, nil
+ }
+ start, end, err := ParsePortRange(rawPort)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(start), int(end), nil
+}
+
+// Proto returns the protocol of a Port
+func (p Port) Proto() string {
+ proto, _ := SplitProtoPort(string(p))
+ return proto
+}
+
+// Port returns the port number of a Port
+func (p Port) Port() string {
+ _, port := SplitProtoPort(string(p))
+ return port
+}
+
+// Int returns the port number of a Port as an int
+func (p Port) Int() int {
+ portStr := p.Port()
+ // We don't need to check for an error because we're going to
+ // assume that any error would have been found, and reported, in NewPort()
+ port, _ := ParsePort(portStr)
+ return port
+}
+
+// Range returns the start/end port numbers of a Port range as ints
+func (p Port) Range() (int, int, error) {
+ return ParsePortRangeToInt(p.Port())
+}
+
+// SplitProtoPort splits a port in the format of proto/port
+func SplitProtoPort(rawPort string) (string, string) {
+ parts := strings.Split(rawPort, "/")
+ l := len(parts)
+ if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
+ return "", ""
+ }
+ if l == 1 {
+ return "tcp", rawPort
+ }
+ if len(parts[1]) == 0 {
+ return "tcp", parts[0]
+ }
+ return parts[1], parts[0]
+}
+
+func validateProto(proto string) bool {
+ for _, availableProto := range []string{"tcp", "udp"} {
+ if availableProto == proto {
+ return true
+ }
+ }
+ return false
+}
+
+// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
+// these in to the internal types
+func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
+ var (
+ exposedPorts = make(map[Port]struct{}, len(ports))
+ bindings = make(map[Port][]PortBinding)
+ )
+ for _, rawPort := range ports {
+ portMappings, err := ParsePortSpec(rawPort)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, portMapping := range portMappings {
+ port := portMapping.Port
+ if _, exists := exposedPorts[port]; !exists {
+ exposedPorts[port] = struct{}{}
+ }
+ bslice, exists := bindings[port]
+ if !exists {
+ bslice = []PortBinding{}
+ }
+ bindings[port] = append(bslice, portMapping.Binding)
+ }
+ }
+ return exposedPorts, bindings, nil
+}
+
+// PortMapping is a data object mapping a Port to a PortBinding
+type PortMapping struct {
+ Port Port
+ Binding PortBinding
+}
+
+func splitParts(rawport string) (string, string, string) {
+ parts := strings.Split(rawport, ":")
+ n := len(parts)
+ containerport := parts[n-1]
+
+ switch n {
+ case 1:
+ return "", "", containerport
+ case 2:
+ return "", parts[0], containerport
+ case 3:
+ return parts[0], parts[1], containerport
+ default:
+ return strings.Join(parts[:n-2], ":"), parts[n-2], containerport
+ }
+}
+
+// ParsePortSpec parses a port specification string into a slice of PortMappings
+func ParsePortSpec(rawPort string) ([]PortMapping, error) {
+ var proto string
+ rawIP, hostPort, containerPort := splitParts(rawPort)
+ proto, containerPort = SplitProtoPort(containerPort)
+
+ // Strip [] from IPV6 addresses
+ ip, _, err := net.SplitHostPort(rawIP + ":")
+ if err != nil {
+ return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err)
+ }
+ if ip != "" && net.ParseIP(ip) == nil {
+ return nil, fmt.Errorf("Invalid ip address: %s", ip)
+ }
+ if containerPort == "" {
+ return nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
+ }
+
+ startPort, endPort, err := ParsePortRange(containerPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
+ }
+
+ var startHostPort, endHostPort uint64 = 0, 0
+ if len(hostPort) > 0 {
+ startHostPort, endHostPort, err = ParsePortRange(hostPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
+ }
+ }
+
+ if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
+ // Allow host port range iff containerPort is not a range.
+ // In this case, use the host port range as the dynamic
+ // host port range to allocate into.
+ if endPort != startPort {
+ return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
+ }
+ }
+
+ if !validateProto(strings.ToLower(proto)) {
+ return nil, fmt.Errorf("Invalid proto: %s", proto)
+ }
+
+ ports := []PortMapping{}
+ for i := uint64(0); i <= (endPort - startPort); i++ {
+ containerPort = strconv.FormatUint(startPort+i, 10)
+ if len(hostPort) > 0 {
+ hostPort = strconv.FormatUint(startHostPort+i, 10)
+ }
+ // Set hostPort to a range only if there is a single container port
+ // and a dynamic host port.
+ if startPort == endPort && startHostPort != endHostPort {
+ hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
+ }
+ port, err := NewPort(strings.ToLower(proto), containerPort)
+ if err != nil {
+ return nil, err
+ }
+
+ binding := PortBinding{
+ HostIP: ip,
+ HostPort: hostPort,
+ }
+ ports = append(ports, PortMapping{Port: port, Binding: binding})
+ }
+ return ports, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go
new file mode 100644
index 000000000..892adf8c6
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/parse.go
@@ -0,0 +1,57 @@
+package nat
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// PartParser parses and validates the specified string (data) using the specified template
+// e.g. ip:public:private -> 192.168.0.1:80:8000
+// DEPRECATED: do not use, this function may be removed in a future version
+func PartParser(template, data string) (map[string]string, error) {
+ // ip:public:private
+ var (
+ templateParts = strings.Split(template, ":")
+ parts = strings.Split(data, ":")
+ out = make(map[string]string, len(templateParts))
+ )
+ if len(parts) != len(templateParts) {
+ return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
+ }
+
+ for i, t := range templateParts {
+ value := ""
+ if len(parts) > i {
+ value = parts[i]
+ }
+ out[t] = value
+ }
+ return out, nil
+}
+
+// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
+func ParsePortRange(ports string) (uint64, uint64, error) {
+ if ports == "" {
+ return 0, 0, fmt.Errorf("Empty string specified for ports.")
+ }
+ if !strings.Contains(ports, "-") {
+ start, err := strconv.ParseUint(ports, 10, 16)
+ end := start
+ return start, end, err
+ }
+
+ parts := strings.Split(ports, "-")
+ start, err := strconv.ParseUint(parts[0], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ end, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ if end < start {
+ return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
+ }
+ return start, end, nil
+}
diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go
new file mode 100644
index 000000000..ce950171e
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/nat/sort.go
@@ -0,0 +1,96 @@
+package nat
+
+import (
+ "sort"
+ "strings"
+)
+
+type portSorter struct {
+ ports []Port
+ by func(i, j Port) bool
+}
+
+func (s *portSorter) Len() int {
+ return len(s.ports)
+}
+
+func (s *portSorter) Swap(i, j int) {
+ s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
+}
+
+func (s *portSorter) Less(i, j int) bool {
+ ip := s.ports[i]
+ jp := s.ports[j]
+
+ return s.by(ip, jp)
+}
+
+// Sort sorts a list of ports using the provided predicate
+// This function should compare `i` and `j`, returning true if `i` is
+// considered to be less than `j`
+func Sort(ports []Port, predicate func(i, j Port) bool) {
+ s := &portSorter{ports, predicate}
+ sort.Sort(s)
+}
+
+type portMapEntry struct {
+ port Port
+ binding PortBinding
+}
+
+type portMapSorter []portMapEntry
+
+func (s portMapSorter) Len() int { return len(s) }
+func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// sort the port so that the order is:
+// 1. port with larger specified bindings
+// 2. larger port
+// 3. port with tcp protocol
+func (s portMapSorter) Less(i, j int) bool {
+ pi, pj := s[i].port, s[j].port
+ hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
+ return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
+}
+
+// SortPortMap sorts the list of ports and their respected mapping. The ports
+// will explicit HostPort will be placed first.
+func SortPortMap(ports []Port, bindings PortMap) {
+ s := portMapSorter{}
+ for _, p := range ports {
+ if binding, ok := bindings[p]; ok {
+ for _, b := range binding {
+ s = append(s, portMapEntry{port: p, binding: b})
+ }
+ bindings[p] = []PortBinding{}
+ } else {
+ s = append(s, portMapEntry{port: p})
+ }
+ }
+
+ sort.Sort(s)
+ var (
+ i int
+ pm = make(map[Port]struct{})
+ )
+ // reorder ports
+ for _, entry := range s {
+ if _, ok := pm[entry.port]; !ok {
+ ports[i] = entry.port
+ pm[entry.port] = struct{}{}
+ i++
+ }
+ // reorder bindings for this port
+ if _, ok := bindings[entry.port]; ok {
+ bindings[entry.port] = append(bindings[entry.port], entry.binding)
+ }
+ }
+}
+
+func toInt(s string) uint64 {
+ i, _, err := ParsePortRange(s)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/README.md
diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
new file mode 100644
index 000000000..99846ffdd
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
@@ -0,0 +1,81 @@
+package sockets
+
+import (
+ "errors"
+ "net"
+ "sync"
+)
+
+var errClosed = errors.New("use of closed network connection")
+
+// InmemSocket implements net.Listener using in-memory only connections.
+type InmemSocket struct {
+ chConn chan net.Conn
+ chClose chan struct{}
+ addr string
+ mu sync.Mutex
+}
+
+// dummyAddr is used to satisfy net.Addr for the in-mem socket
+// it is just stored as a string and returns the string for all calls
+type dummyAddr string
+
+// NewInmemSocket creates an in-memory only net.Listener
+// The addr argument can be any string, but is used to satisfy the `Addr()` part
+// of the net.Listener interface
+func NewInmemSocket(addr string, bufSize int) *InmemSocket {
+ return &InmemSocket{
+ chConn: make(chan net.Conn, bufSize),
+ chClose: make(chan struct{}),
+ addr: addr,
+ }
+}
+
+// Addr returns the socket's addr string to satisfy net.Listener
+func (s *InmemSocket) Addr() net.Addr {
+ return dummyAddr(s.addr)
+}
+
+// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
+func (s *InmemSocket) Accept() (net.Conn, error) {
+ select {
+ case conn := <-s.chConn:
+ return conn, nil
+ case <-s.chClose:
+ return nil, errClosed
+ }
+}
+
+// Close closes the listener. It will be unavailable for use once closed.
+func (s *InmemSocket) Close() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ select {
+ case <-s.chClose:
+ default:
+ close(s.chClose)
+ }
+ return nil
+}
+
+// Dial is used to establish a connection with the in-mem server
+func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
+ srvConn, clientConn := net.Pipe()
+ select {
+ case s.chConn <- srvConn:
+ case <-s.chClose:
+ return nil, errClosed
+ }
+
+ return clientConn, nil
+}
+
+// Network returns the addr string, satisfies net.Addr
+func (a dummyAddr) Network() string {
+ return string(a)
+}
+
+// String returns the string form
+func (a dummyAddr) String() string {
+ return string(a)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go
new file mode 100644
index 000000000..98e9a1dc6
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/proxy.go
@@ -0,0 +1,51 @@
+package sockets
+
+import (
+ "net"
+ "net/url"
+ "os"
+ "strings"
+
+ "golang.org/x/net/proxy"
+)
+
+// GetProxyEnv allows access to the uppercase and the lowercase forms of
+// proxy-related variables. See the Go specification for details on these
+// variables. https://golang.org/pkg/net/http/
+func GetProxyEnv(key string) string {
+ proxyValue := os.Getenv(strings.ToUpper(key))
+ if proxyValue == "" {
+ return os.Getenv(strings.ToLower(key))
+ }
+ return proxyValue
+}
+
+// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
+// proxy.Dialer which will route the connections through the proxy using the
+// given dialer.
+func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
+ allProxy := GetProxyEnv("all_proxy")
+ if len(allProxy) == 0 {
+ return direct, nil
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return direct, err
+ }
+
+ proxyFromURL, err := proxy.FromURL(proxyURL, direct)
+ if err != nil {
+ return direct, err
+ }
+
+ noProxy := GetProxyEnv("no_proxy")
+ if len(noProxy) == 0 {
+ return proxyFromURL, nil
+ }
+
+ perHost := proxy.NewPerHost(proxyFromURL, direct)
+ perHost.AddFromString(noProxy)
+
+ return perHost, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
new file mode 100644
index 000000000..a1d7beb4d
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets.go
@@ -0,0 +1,38 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "errors"
+ "net"
+ "net/http"
+ "time"
+)
+
+// Why 32? See https://github.com/docker/docker/pull/8035.
+const defaultTimeout = 32 * time.Second
+
+// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
+var ErrProtocolNotAvailable = errors.New("protocol not available")
+
+// ConfigureTransport configures the specified Transport according to the
+// specified proto and addr.
+// If the proto is unix (using a unix socket to communicate) or npipe the
+// compression is disabled.
+func ConfigureTransport(tr *http.Transport, proto, addr string) error {
+ switch proto {
+ case "unix":
+ return configureUnixTransport(tr, proto, addr)
+ case "npipe":
+ return configureNpipeTransport(tr, proto, addr)
+ default:
+ tr.Proxy = http.ProxyFromEnvironment
+ dialer, err := DialerFromEnvironment(&net.Dialer{
+ Timeout: defaultTimeout,
+ })
+ if err != nil {
+ return err
+ }
+ tr.Dial = dialer.Dial
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
new file mode 100644
index 000000000..386cf0dbb
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
@@ -0,0 +1,35 @@
+// +build !windows
+
+package sockets
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "syscall"
+ "time"
+)
+
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ if len(addr) > maxUnixSocketPathSize {
+ return fmt.Errorf("Unix socket path %q is too long", addr)
+ }
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return net.DialTimeout(proto, addr, defaultTimeout)
+ }
+ return nil
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+// DialPipe connects to a Windows named pipe.
+// This is not supported on other OSes.
+func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
+ return nil, syscall.EAFNOSUPPORT
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
new file mode 100644
index 000000000..5c21644e1
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
@@ -0,0 +1,27 @@
+package sockets
+
+import (
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return DialPipe(addr, defaultTimeout)
+ }
+ return nil
+}
+
+// DialPipe connects to a Windows named pipe.
+func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
+ return winio.DialPipe(addr, &timeout)
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
new file mode 100644
index 000000000..53cbb6c79
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
@@ -0,0 +1,22 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+// NewTCPSocket creates a TCP socket listener with the specified address and
+// the specified tls configuration. If TLSConfig is set, will encapsulate the
+// TCP listener inside a TLS one.
+func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ if tlsConfig != nil {
+ tlsConfig.NextProtos = []string{"http/1.1"}
+ l = tls.NewListener(l, tlsConfig)
+ }
+ return l, nil
+}
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
new file mode 100644
index 000000000..a8b5dbb6f
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package sockets
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+// NewUnixSocket creates a unix socket with the specified path and group.
+func NewUnixSocket(path string, gid int) (net.Listener, error) {
+ if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ mask := syscall.Umask(0777)
+ defer syscall.Umask(mask)
+
+ l, err := net.Listen("unix", path)
+ if err != nil {
+ return nil, err
+ }
+ if err := os.Chown(path, 0, gid); err != nil {
+ l.Close()
+ return nil, err
+ }
+ if err := os.Chmod(path, 0660); err != nil {
+ l.Close()
+ return nil, err
+ }
+ return l, nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
new file mode 100644
index 000000000..1ca0965e0
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
@@ -0,0 +1,18 @@
+// +build go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+ "runtime"
+)
+
+// SystemCertPool returns a copy of the system cert pool,
+// returns an error if failed to load or empty pool on windows.
+func SystemCertPool() (*x509.CertPool, error) {
+ certpool, err := x509.SystemCertPool()
+ if err != nil && runtime.GOOS == "windows" {
+ return x509.NewCertPool(), nil
+ }
+ return certpool, err
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
new file mode 100644
index 000000000..9ca974539
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+
+)
+
+// SystemCertPool returns an new empty cert pool,
+// accessing system cert pool is supported in go 1.7
+func SystemCertPool() (*x509.CertPool, error) {
+ return x509.NewCertPool(), nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
new file mode 100644
index 000000000..1b31bbb8b
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go
@@ -0,0 +1,244 @@
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
+// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
+// A Config may be reused; the tls package will also not modify it.
+package tlsconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/pkg/errors"
+)
+
+// Options represents the information needed to create client and server TLS configurations.
+type Options struct {
+ CAFile string
+
+ // If either CertFile or KeyFile is empty, Client() will not load them
+ // preventing the client from authenticating to the server.
+ // However, Server() requires them and will error out if they are empty.
+ CertFile string
+ KeyFile string
+
+ // client-only option
+ InsecureSkipVerify bool
+ // server-only option
+ ClientAuth tls.ClientAuthType
+ // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
+ // creds will include exclusively the roots in that CA file. If no CA file is provided,
+ // the system pool will be used.
+ ExclusiveRootPools bool
+ MinVersion uint16
+ // If Passphrase is set, it will be used to decrypt a TLS private key
+ // if the key is encrypted
+ Passphrase string
+}
+
+// Extra (server-side) accepted CBC cipher suites - will phase out in the future
+var acceptedCBCCiphers = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+}
+
+// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
+// options struct but wants to use a commonly accepted set of TLS cipher suites, with
+// known weak algorithms removed.
+var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
+
+// allTLSVersions lists all the TLS versions and is used by the code that validates
+// a uint16 value as a TLS version.
+var allTLSVersions = map[uint16]struct{}{
+ tls.VersionSSL30: {},
+ tls.VersionTLS10: {},
+ tls.VersionTLS11: {},
+ tls.VersionTLS12: {},
+}
+
+// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
+func ServerDefault() *tls.Config {
+ return &tls.Config{
+ // Avoid fallback to SSL protocols < TLS1.0
+ MinVersion: tls.VersionTLS10,
+ PreferServerCipherSuites: true,
+ CipherSuites: DefaultServerAcceptedCiphers,
+ }
+}
+
+// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
+func ClientDefault() *tls.Config {
+ return &tls.Config{
+ // Prefer TLS1.2 as the client minimum
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: clientCipherSuites,
+ }
+}
+
+// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
+func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
+ // If we should verify the server, we need to load a trusted ca
+ var (
+ certPool *x509.CertPool
+ err error
+ )
+ if exclusivePool {
+ certPool = x509.NewCertPool()
+ } else {
+ certPool, err = SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read system certificates: %v", err)
+ }
+ }
+ pem, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
+ }
+ if !certPool.AppendCertsFromPEM(pem) {
+ return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
+ }
+ return certPool, nil
+}
+
+// isValidMinVersion checks that the input value is a valid tls minimum version
+func isValidMinVersion(version uint16) bool {
+ _, ok := allTLSVersions[version]
+ return ok
+}
+
+// adjustMinVersion sets the MinVersion on `config`, the input configuration.
+// It assumes the current MinVersion on the `config` is the lowest allowed.
+func adjustMinVersion(options Options, config *tls.Config) error {
+ if options.MinVersion > 0 {
+ if !isValidMinVersion(options.MinVersion) {
+ return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion)
+ }
+ if options.MinVersion < config.MinVersion {
+ return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
+ }
+ config.MinVersion = options.MinVersion
+ }
+
+ return nil
+}
+
+// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
+// password when tryin to decrypt a TLS private key
+func IsErrEncryptedKey(err error) bool {
+ return errors.Cause(err) == x509.IncorrectPasswordError
+}
+
+// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
+// If the private key is encrypted, 'passphrase' is used to decrypted the
+// private key.
+func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
+ // this section makes some small changes to code from notary/tuf/utils/x509.go
+ pemBlock, _ := pem.Decode(keyBytes)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("no valid private key found")
+ }
+
+ var err error
+ if x509.IsEncryptedPEMBlock(pemBlock) {
+ keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
+ }
+ keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
+ }
+
+ return keyBytes, nil
+}
+
+// getCert returns a Certificate from the CertFile and KeyFile in 'options',
+// if the key is encrypted, the Passphrase in 'options' will be used to
+// decrypt it.
+func getCert(options Options) ([]tls.Certificate, error) {
+ if options.CertFile == "" && options.KeyFile == "" {
+ return nil, nil
+ }
+
+ errMessage := "Could not load X509 key pair"
+
+ cert, err := ioutil.ReadFile(options.CertFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err := ioutil.ReadFile(options.KeyFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ return []tls.Certificate{tlsCert}, nil
+}
+
+// Client returns a TLS configuration meant to be used by a client.
+func Client(options Options) (*tls.Config, error) {
+ tlsConfig := ClientDefault()
+ tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
+ if !options.InsecureSkipVerify && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = CAs
+ }
+
+ tlsCerts, err := getCert(options)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = tlsCerts
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
+
+// Server returns a TLS configuration meant to be used by a server.
+func Server(options Options) (*tls.Config, error) {
+ tlsConfig := ServerDefault()
+ tlsConfig.ClientAuth = options.ClientAuth
+ tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
+ }
+ return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.ClientCAs = CAs
+ }
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
new file mode 100644
index 000000000..6b4c6a7c0
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
@@ -0,0 +1,17 @@
+// +build go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
new file mode 100644
index 000000000..ee22df47c
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
@@ -0,0 +1,15 @@
+// +build !go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE
new file mode 100644
index 000000000..b55b37bc3
--- /dev/null
+++ b/vendor/github.com/docker/go-units/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 000000000..4f70a4e13
--- /dev/null
+++ b/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,16 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc.
+
+go-units is licensed under the Apache License, Version 2.0.
+See [LICENSE](LICENSE) for the full text of the license.
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 000000000..c219a8a96
--- /dev/null
+++ b/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 000000000..f5b82ea24
--- /dev/null
+++ b/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,96 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ unitsLimit := len(_map) - 1
+ for size >= base && i < unitsLimit {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 4 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[3])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= float64(mul)
+ }
+
+ return int64(size), nil
+}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 000000000..5ac7fd825
--- /dev/null
+++ b/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE
new file mode 100644
index 000000000..27448585a
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md
new file mode 100644
index 000000000..dcffb31ae
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/README.md
@@ -0,0 +1,22 @@
+# libtrust
+
+> **WARNING** this library is no longer actively developed, and will be integrated
+> in the [docker/distribution][https://www.github.com/docker/distribution]
+> repository in future.
+
+Libtrust is library for managing authentication and authorization using public key cryptography.
+
+Authentication is handled using the identity attached to the public key.
+Libtrust provides multiple methods to prove possession of the private key associated with an identity.
+ - TLS x509 certificates
+ - Signature verification
+ - Key Challenge
+
+Authorization and access control is managed through a distributed trust graph.
+Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
+
+## Copyright and license
+
+Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
+Docs released under Creative commons.
+
diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go
new file mode 100644
index 000000000..3dcca33cb
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/certificates.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "time"
+)
+
+type certTemplateInfo struct {
+ commonName string
+ domains []string
+ ipAddresses []net.IP
+ isCA bool
+ clientAuth bool
+ serverAuth bool
+}
+
+func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
+ // Generate a certificate template which is valid from the past week to
+ // 10 years from now. The usage of the certificate depends on the
+ // specified fields in the given certTempInfo object.
+ var (
+ keyUsage x509.KeyUsage
+ extKeyUsage []x509.ExtKeyUsage
+ )
+
+ if info.isCA {
+ keyUsage = x509.KeyUsageCertSign
+ }
+
+ if info.clientAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
+ }
+
+ if info.serverAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
+ }
+
+ return &x509.Certificate{
+ SerialNumber: big.NewInt(0),
+ Subject: pkix.Name{
+ CommonName: info.commonName,
+ },
+ NotBefore: time.Now().Add(-time.Hour * 24 * 7),
+ NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
+ DNSNames: info.domains,
+ IPAddresses: info.ipAddresses,
+ IsCA: info.isCA,
+ KeyUsage: keyUsage,
+ ExtKeyUsage: extKeyUsage,
+ BasicConstraintsValid: info.isCA,
+ }
+}
+
+func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
+ pubCertTemplate := generateCertTemplate(subInfo)
+ privCertTemplate := generateCertTemplate(issInfo)
+
+ certDER, err := x509.CreateCertificate(
+ rand.Reader, pubCertTemplate, privCertTemplate,
+ pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create certificate: %s", err)
+ }
+
+ cert, err = x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate: %s", err)
+ }
+
+ return
+}
+
+// GenerateSelfSignedServerCert creates a self-signed certificate for the
+// given key which is to be used for TLS servers with the given domains and
+// IP addresses.
+func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ domains: domains,
+ ipAddresses: ipAddresses,
+ serverAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateSelfSignedClientCert creates a self-signed certificate for the
+// given key which is to be used for TLS clients.
+func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ clientAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateCACert creates a certificate which can be used as a trusted
+// certificate authority.
+func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
+ subjectInfo := &certTemplateInfo{
+ commonName: trustedKey.KeyID(),
+ isCA: true,
+ }
+ issuerInfo := &certTemplateInfo{
+ commonName: signer.KeyID(),
+ }
+
+ return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
+}
+
+// GenerateCACertPool creates a certificate authority pool to be used for a
+// TLS configuration. Any self-signed certificates issued by the specified
+// trusted keys will be verified during a TLS handshake
+func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
+ certPool := x509.NewCertPool()
+
+ for _, trustedKey := range trustedKeys {
+ cert, err := GenerateCACert(signer, trustedKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
+ }
+
+ certPool.AddCert(cert)
+ }
+
+ return certPool, nil
+}
+
+// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ certificates := []*x509.Certificate{}
+ var block *pem.Block
+ block, b = pem.Decode(b)
+ for ; block != nil; block, b = pem.Decode(b) {
+ if block.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certificates = append(certificates, cert)
+ } else {
+ return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
+ }
+ }
+
+ return certificates, nil
+}
+
+// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificatePool(filename string) (*x509.CertPool, error) {
+ certs, err := LoadCertificateBundle(filename)
+ if err != nil {
+ return nil, err
+ }
+ pool := x509.NewCertPool()
+ for _, cert := range certs {
+ pool.AddCert(cert)
+ }
+ return pool, nil
+}
diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go
new file mode 100644
index 000000000..ec5d2159c
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/doc.go
@@ -0,0 +1,9 @@
+/*
+Package libtrust provides an interface for managing authentication and
+authorization using public key cryptography. Authentication is handled
+using the identity attached to the public key and verified through TLS
+x509 certificates, a key challenge, or signature. Authorization and
+access control is managed through a trust graph distributed between
+both remote trust servers and locally cached and managed data.
+*/
+package libtrust
diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go
new file mode 100644
index 000000000..00bbe4b3c
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/ec_key.go
@@ -0,0 +1,428 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * EC DSA PUBLIC KEY
+ */
+
+// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
+// signature algorithms.
+type ecPublicKey struct {
+ *ecdsa.PublicKey
+ curveName string
+ signatureAlgorithm *signatureAlgorithm
+ extended map[string]interface{}
+}
+
+func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
+ curve := cryptoPublicKey.Curve
+
+ switch {
+ case curve == elliptic.P256():
+ return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
+ case curve == elliptic.P384():
+ return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
+ case curve == elliptic.P521():
+ return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
+ default:
+ return nil, errors.New("unsupported elliptic curve")
+ }
+}
+
+// KeyType returns the key type for elliptic curve keys, i.e., "EC".
+func (k *ecPublicKey) KeyType() string {
+ return "EC"
+}
+
+// CurveName returns the elliptic curve identifier.
+// Possible values are "P-256", "P-384", and "P-521".
+func (k *ecPublicKey) CurveName() string {
+ return k.curveName
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *ecPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *ecPublicKey) String() string {
+ return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this
+// PublicKey. The alg parameter should identify the digital signature
+// algorithm which was used to produce the signature and should be supported
+// by this public key. Returns a nil error if the signature is valid.
+func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // For EC keys there is only one supported signature algorithm depending
+ // on the curve parameters.
+ if k.signatureAlgorithm.HeaderParam() != alg {
+ return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
+ }
+
+ // signature is the concatenation of (r, s), base64Url encoded.
+ sigLength := len(signature)
+ expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
+ if sigLength != expectedOctetLength {
+ return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
+ }
+
+ rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
+ r := new(big.Int).SetBytes(rBytes)
+ s := new(big.Int).SetBytes(sBytes)
+
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err := io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ if !ecdsa.Verify(k.PublicKey, hash, r, s) {
+ return errors.New("invalid signature")
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *ecPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["crv"] = k.CurveName()
+
+ xBytes := k.X.Bytes()
+ yBytes := k.Y.Bytes()
+ octetLength := (k.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output so that x, y are each
+ // *octetLength* bytes long.
+ xBuf := make([]byte, octetLength-len(xBytes), octetLength)
+ yBuf := make([]byte, octetLength-len(yBytes), octetLength)
+ xBuf = append(xBuf, xBytes...)
+ yBuf = append(yBuf, yBytes...)
+
+ jwk["x"] = joseBase64UrlEncode(xBuf)
+ jwk["y"] = joseBase64UrlEncode(yBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *ecPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract 'crv', 'x', 'y', and 'kid' and check for
+ // consistency.
+
+ // Get the curve identifier value.
+ crv, err := stringFromMap(jwk, "crv")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
+ }
+
+ var (
+ curve elliptic.Curve
+ sigAlg *signatureAlgorithm
+ )
+
+ switch {
+ case crv == "P-256":
+ curve = elliptic.P256()
+ sigAlg = es256
+ case crv == "P-384":
+ curve = elliptic.P384()
+ sigAlg = es384
+ case crv == "P-521":
+ curve = elliptic.P521()
+ sigAlg = es512
+ default:
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
+ }
+
+ // Get the X and Y coordinates for the public key point.
+ xB64Url, err := stringFromMap(jwk, "x")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+ x, err := parseECCoordinate(xB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+
+ yB64Url, err := stringFromMap(jwk, "y")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+ y, err := parseECCoordinate(yB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+
+ key := &ecPublicKey{
+ PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
+ curveName: crv, signatureAlgorithm: sigAlg,
+ }
+
+ // Key ID is optional too, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
+ }
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * EC DSA PRIVATE KEY
+ */
+
+// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
+// algorithms.
+type ecPrivateKey struct {
+ ecPublicKey
+ *ecdsa.PrivateKey
+}
+
+func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
+ publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *ecPrivateKey) PublicKey() PublicKey {
+ return &k.ecPublicKey
+}
+
+func (k *ecPrivateKey) String() string {
+ return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the elliptic curve private key. If the specified hashing algorithm is
+// supported by this key, that hash function is used to generate the signature
+// otherwise the the default hashing algorithm for this key is used. Returns
+// the signature and the name of the JWK signature algorithm used, e.g.,
+// "ES256", "ES384", "ES512".
+func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ // The given hashId is only a suggestion, and since EC keys only support
+ // on signature/hash algorithm given the curve name, we disregard it for
+ // the elliptic curve JWK signature implementation.
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+ rBytes, sBytes := r.Bytes(), s.Bytes()
+ octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output
+ rBuf := make([]byte, octetLength-len(rBytes), octetLength)
+ sBuf := make([]byte, octetLength-len(sBytes), octetLength)
+
+ rBuf = append(rBuf, rBytes...)
+ sBuf = append(sBuf, sBytes...)
+
+ signature = append(rBuf, sBuf...)
+ alg = k.signatureAlgorithm.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *ecPrivateKey) toMap() map[string]interface{} {
+ jwk := k.ecPublicKey.toMap()
+
+ dBytes := k.D.Bytes()
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := k.ecPublicKey.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ // Create a buffer with the necessary zero-padding.
+ dBuf := make([]byte, octetLength-len(dBytes), octetLength)
+ dBuf = append(dBuf, dBytes...)
+
+ jwk["d"] = joseBase64UrlEncode(dBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
+}
+
+func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
+ dB64Url, err := stringFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key: %s", err)
+ }
+
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract the public key information, then extract the private
+ // key value 'd'.
+ publicKey, err := ecPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
+ }
+
+ key := &ecPrivateKey{
+ ecPublicKey: *publicKey,
+ PrivateKey: &ecdsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: d,
+ },
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
+ k = new(ecPrivateKey)
+ k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
+func GenerateECP256PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
+ }
+
+ k.curveName = "P-256"
+ k.signatureAlgorithm = es256
+
+ return k, nil
+}
+
+// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
+func GenerateECP384PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P384())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
+ }
+
+ k.curveName = "P-384"
+ k.signatureAlgorithm = es384
+
+ return k, nil
+}
+
+// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
+func GenerateECP521PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P521())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
+ }
+
+ k.curveName = "P-521"
+ k.signatureAlgorithm = es512
+
+ return k, nil
+}
diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go
new file mode 100644
index 000000000..5b2b4fca6
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/filter.go
@@ -0,0 +1,50 @@
+package libtrust
+
+import (
+ "path/filepath"
+)
+
+// FilterByHosts filters the list of PublicKeys to only those which contain a
+// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
+// then keys which do not specify any hosts are also returned.
+func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
+ filtered := make([]PublicKey, 0, len(keys))
+
+ for _, pubKey := range keys {
+ var hosts []string
+ switch v := pubKey.GetExtendedField("hosts").(type) {
+ case []string:
+ hosts = v
+ case []interface{}:
+ for _, value := range v {
+ h, ok := value.(string)
+ if !ok {
+ continue
+ }
+ hosts = append(hosts, h)
+ }
+ }
+
+ if len(hosts) == 0 {
+ if includeEmpty {
+ filtered = append(filtered, pubKey)
+ }
+ continue
+ }
+
+ // Check if any hosts match pattern
+ for _, hostPattern := range hosts {
+ match, err := filepath.Match(hostPattern, host)
+ if err != nil {
+ return nil, err
+ }
+
+ if match {
+ filtered = append(filtered, pubKey)
+ continue
+ }
+ }
+ }
+
+ return filtered, nil
+}
diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go
new file mode 100644
index 000000000..a2df787dd
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/hash.go
@@ -0,0 +1,56 @@
+package libtrust
+
+import (
+ "crypto"
+ _ "crypto/sha256" // Registrer SHA224 and SHA256
+ _ "crypto/sha512" // Registrer SHA384 and SHA512
+ "fmt"
+)
+
+type signatureAlgorithm struct {
+ algHeaderParam string
+ hashID crypto.Hash
+}
+
+func (h *signatureAlgorithm) HeaderParam() string {
+ return h.algHeaderParam
+}
+
+func (h *signatureAlgorithm) HashID() crypto.Hash {
+ return h.hashID
+}
+
+var (
+ rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
+ rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
+ rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
+ es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
+ es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
+ es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
+)
+
+func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
+ switch {
+ case alg == "RS256":
+ return rs256, nil
+ case alg == "RS384":
+ return rs384, nil
+ case alg == "RS512":
+ return rs512, nil
+ default:
+ return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
+ }
+}
+
+func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
+ switch {
+ case hashID == crypto.SHA512:
+ return rs512
+ case hashID == crypto.SHA384:
+ return rs384
+ case hashID == crypto.SHA256:
+ fallthrough
+ default:
+ return rs256
+ }
+}
diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go
new file mode 100644
index 000000000..cb2ca9a76
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/jsonsign.go
@@ -0,0 +1,657 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "time"
+ "unicode"
+)
+
+var (
+ // ErrInvalidSignContent is used when the content to be signed is invalid.
+ ErrInvalidSignContent = errors.New("invalid sign content")
+
+ // ErrInvalidJSONContent is used when invalid json is encountered.
+ ErrInvalidJSONContent = errors.New("invalid json content")
+
+ // ErrMissingSignatureKey is used when the specified signature key
+ // does not exist in the JSON content.
+ ErrMissingSignatureKey = errors.New("missing signature key")
+)
+
+type jsHeader struct {
+ JWK PublicKey `json:"jwk,omitempty"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c,omitempty"`
+}
+
+type jsSignature struct {
+ Header jsHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected,omitempty"`
+}
+
+type jsSignaturesSorted []jsSignature
+
+func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
+func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
+
+func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
+ ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
+ si, sj := jsbkid[i].Signature, jsbkid[j].Signature
+
+ if ki == kj {
+ return si < sj
+ }
+
+ return ki < kj
+}
+
+type signKey struct {
+ PrivateKey
+ Chain []*x509.Certificate
+}
+
+// JSONSignature represents a signature of a json object.
+type JSONSignature struct {
+ payload string
+ signatures []jsSignature
+ indent string
+ formatLength int
+ formatTail []byte
+}
+
+func newJSONSignature() *JSONSignature {
+ return &JSONSignature{
+ signatures: make([]jsSignature, 0, 1),
+ }
+}
+
+// Payload returns the encoded payload of the signature. This
+// payload should not be signed directly
+func (js *JSONSignature) Payload() ([]byte, error) {
+ return joseBase64UrlDecode(js.payload)
+}
+
+func (js *JSONSignature) protectedHeader() (string, error) {
+ protected := map[string]interface{}{
+ "formatLength": js.formatLength,
+ "formatTail": joseBase64UrlEncode(js.formatTail),
+ "time": time.Now().UTC().Format(time.RFC3339),
+ }
+ protectedBytes, err := json.Marshal(protected)
+ if err != nil {
+ return "", err
+ }
+
+ return joseBase64UrlEncode(protectedBytes), nil
+}
+
+func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
+ buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
+ copy(buf, protectedHeader)
+ buf[len(protectedHeader)] = '.'
+ copy(buf[len(protectedHeader)+1:], js.payload)
+ return buf, nil
+}
+
+// Sign adds a signature using the given private key.
+func (js *JSONSignature) Sign(key PrivateKey) error {
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ js.signatures = append(js.signatures, jsSignature{
+ Header: jsHeader{
+ JWK: key.PublicKey(),
+ Algorithm: algorithm,
+ },
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ })
+
+ return nil
+}
+
+// SignWithChain adds a signature using the given private key
+// and setting the x509 chain. The public key of the first element
+// in the chain must be the public key corresponding with the sign key.
+func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
+ // Ensure key.Chain[0] is public key for key
+ //key.Chain.PublicKey
+ //key.PublicKey().CryptoPublicKey()
+
+ // Verify chain
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ header := jsHeader{
+ Chain: make([]string, len(chain)),
+ Algorithm: algorithm,
+ }
+
+ for i, cert := range chain {
+ header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
+ }
+
+ js.signatures = append(js.signatures, jsSignature{
+ Header: header,
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ })
+
+ return nil
+}
+
+// Verify verifies all the signatures and returns the list of
+// public keys used to sign. Any x509 chains are not checked.
+func (js *JSONSignature) Verify() ([]PublicKey, error) {
+ keys := make([]PublicKey, len(js.signatures))
+ for i, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ } else if signature.Header.JWK != nil {
+ publicKey = signature.Header.JWK
+ } else {
+ return nil, errors.New("missing public key")
+ }
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ keys[i] = publicKey
+ }
+ return keys, nil
+}
+
+// VerifyChains verifies all the signatures and the chains associated
+// with each signature and returns the list of verified chains.
+// Signatures without an x509 chain are not checked.
+func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
+ chains := make([][]*x509.Certificate, 0, len(js.signatures))
+ for _, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ intermediates := x509.NewCertPool()
+ if len(signature.Header.Chain) > 1 {
+ intermediateChain := signature.Header.Chain[1:]
+ for i := range intermediateChain {
+ certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
+ if err != nil {
+ return nil, err
+ }
+ intermediate, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ intermediates.AddCert(intermediate)
+ }
+ }
+
+ verifyOptions := x509.VerifyOptions{
+ Intermediates: intermediates,
+ Roots: ca,
+ }
+
+ verifiedChains, err := cert.Verify(verifyOptions)
+ if err != nil {
+ return nil, err
+ }
+ chains = append(chains, verifiedChains...)
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ }
+ return chains, nil
+}
+
+// JWS returns JSON serialized JWS according to
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
+func (js *JSONSignature) JWS() ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("missing signature")
+ }
+
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ jsonMap := map[string]interface{}{
+ "payload": js.payload,
+ "signatures": js.signatures,
+ }
+
+ return json.MarshalIndent(jsonMap, "", " ")
+}
+
+func notSpace(r rune) bool {
+ return !unicode.IsSpace(r)
+}
+
+func detectJSONIndent(jsonContent []byte) (indent string) {
+ if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
+ quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
+ if quoteIndex > 0 {
+ indent = string(jsonContent[2 : quoteIndex+1])
+ }
+ }
+ return
+}
+
+type jsParsedHeader struct {
+ JWK json.RawMessage `json:"jwk"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c"`
+}
+
+type jsParsedSignature struct {
+ Header jsParsedHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected"`
+}
+
+// ParseJWS parses a JWS serialized JSON object into a Json Signature.
+func ParseJWS(content []byte) (*JSONSignature, error) {
+ type jsParsed struct {
+ Payload string `json:"payload"`
+ Signatures []jsParsedSignature `json:"signatures"`
+ }
+ parsed := &jsParsed{}
+ err := json.Unmarshal(content, parsed)
+ if err != nil {
+ return nil, err
+ }
+ if len(parsed.Signatures) == 0 {
+ return nil, errors.New("missing signatures")
+ }
+ payload, err := joseBase64UrlDecode(parsed.Payload)
+ if err != nil {
+ return nil, err
+ }
+
+ js, err := NewJSONSignature(payload)
+ if err != nil {
+ return nil, err
+ }
+ js.signatures = make([]jsSignature, len(parsed.Signatures))
+ for i, signature := range parsed.Signatures {
+ header := jsHeader{
+ Algorithm: signature.Header.Algorithm,
+ }
+ if signature.Header.Chain != nil {
+ header.Chain = signature.Header.Chain
+ }
+ if signature.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
+ if err != nil {
+ return nil, err
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = jsSignature{
+ Header: header,
+ Signature: signature.Signature,
+ Protected: signature.Protected,
+ }
+ }
+
+ return js, nil
+}
+
+// NewJSONSignature returns a new unsigned JWS from a json byte array.
+// JSONSignature will need to be signed before serializing or storing.
+// Optionally, one or more signatures can be provided as byte buffers,
+// containing serialized JWS signatures, to assemble a fully signed JWS
+// package. It is the callers responsibility to ensure uniqueness of the
+// provided signatures.
+func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
+ var dataMap map[string]interface{}
+ err := json.Unmarshal(content, &dataMap)
+ if err != nil {
+ return nil, err
+ }
+
+ js := newJSONSignature()
+ js.indent = detectJSONIndent(content)
+
+ js.payload = joseBase64UrlEncode(content)
+
+ // Find trailing } and whitespace, put in protected header
+ closeIndex := bytes.LastIndexFunc(content, notSpace)
+ if content[closeIndex] != '}' {
+ return nil, ErrInvalidJSONContent
+ }
+ lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
+ if content[lastRuneIndex] == ',' {
+ return nil, ErrInvalidJSONContent
+ }
+ js.formatLength = lastRuneIndex + 1
+ js.formatTail = content[js.formatLength:]
+
+ if len(signatures) > 0 {
+ for _, signature := range signatures {
+ var parsedJSig jsParsedSignature
+
+ if err := json.Unmarshal(signature, &parsedJSig); err != nil {
+ return nil, err
+ }
+
+ // TODO(stevvooe): A lot of the code below is repeated in
+ // ParseJWS. It will require more refactoring to fix that.
+ jsig := jsSignature{
+ Header: jsHeader{
+ Algorithm: parsedJSig.Header.Algorithm,
+ },
+ Signature: parsedJSig.Signature,
+ Protected: parsedJSig.Protected,
+ }
+
+ if parsedJSig.Header.Chain != nil {
+ jsig.Header.Chain = parsedJSig.Header.Chain
+ }
+
+ if parsedJSig.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
+ if err != nil {
+ return nil, err
+ }
+ jsig.Header.JWK = publicKey
+ }
+
+ js.signatures = append(js.signatures, jsig)
+ }
+ }
+
+ return js, nil
+}
+
+// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
+// struct. JWS will need to be signed before serializing or storing.
+func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
+ switch content.(type) {
+ case map[string]interface{}:
+ case struct{}:
+ default:
+ return nil, errors.New("invalid data type")
+ }
+
+ js := newJSONSignature()
+ js.indent = " "
+
+ payload, err := json.MarshalIndent(content, "", js.indent)
+ if err != nil {
+ return nil, err
+ }
+ js.payload = joseBase64UrlEncode(payload)
+
+ // Remove '\n}' from formatted section, put in protected header
+ js.formatLength = len(payload) - 2
+ js.formatTail = payload[js.formatLength:]
+
+ return js, nil
+}
+
+func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
+ value, ok := m[key]
+ if !ok {
+ return 0, false
+ }
+ switch v := value.(type) {
+ case int:
+ return v, true
+ case float64:
+ return int(v), true
+ default:
+ return 0, false
+ }
+}
+
+func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
+ value, ok := m[key]
+ if !ok {
+ return "", false
+ }
+ v, ok = value.(string)
+ return
+}
+
+// ParsePrettySignature parses a formatted signature into a
+// JSON signature. If the signatures are missing the format information
+// an error is thrown. The formatted signature must be created by
+// the same method as format signature.
+func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
+ var contentMap map[string]json.RawMessage
+ err := json.Unmarshal(content, &contentMap)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling content: %s", err)
+ }
+ sigMessage, ok := contentMap[signatureKey]
+ if !ok {
+ return nil, ErrMissingSignatureKey
+ }
+
+ var signatureBlocks []jsParsedSignature
+ err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
+ }
+
+ js := newJSONSignature()
+ js.signatures = make([]jsSignature, len(signatureBlocks))
+
+ for i, signatureBlock := range signatureBlocks {
+ protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error: %s", err)
+ }
+ var protectedHeader map[string]interface{}
+ err = json.Unmarshal(protectedBytes, &protectedHeader)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
+ }
+
+ formatLength, ok := readIntFromMap("formatLength", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted length")
+ }
+ encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted tail")
+ }
+ formatTail, err := joseBase64UrlDecode(encodedTail)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error on tail: %s", err)
+ }
+ if js.formatLength == 0 {
+ js.formatLength = formatLength
+ } else if js.formatLength != formatLength {
+ return nil, errors.New("conflicting format length")
+ }
+ if len(js.formatTail) == 0 {
+ js.formatTail = formatTail
+ } else if bytes.Compare(js.formatTail, formatTail) != 0 {
+ return nil, errors.New("conflicting format tail")
+ }
+
+ header := jsHeader{
+ Algorithm: signatureBlock.Header.Algorithm,
+ Chain: signatureBlock.Header.Chain,
+ }
+ if signatureBlock.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling public key: %s", err)
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = jsSignature{
+ Header: header,
+ Signature: signatureBlock.Signature,
+ Protected: signatureBlock.Protected,
+ }
+ }
+ if js.formatLength > len(content) {
+ return nil, errors.New("invalid format length")
+ }
+ formatted := make([]byte, js.formatLength+len(js.formatTail))
+ copy(formatted, content[:js.formatLength])
+ copy(formatted[js.formatLength:], js.formatTail)
+ js.indent = detectJSONIndent(formatted)
+ js.payload = joseBase64UrlEncode(formatted)
+
+ return js, nil
+}
+
+// PrettySignature formats a json signature into an easy to read
+// single json serialized object.
+func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("no signatures")
+ }
+ payload, err := joseBase64UrlDecode(js.payload)
+ if err != nil {
+ return nil, err
+ }
+ payload = payload[:js.formatLength]
+
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ var marshalled []byte
+ var marshallErr error
+ if js.indent != "" {
+ marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
+ } else {
+ marshalled, marshallErr = json.Marshal(js.signatures)
+ }
+ if marshallErr != nil {
+ return nil, marshallErr
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
+ buf.Write(payload)
+ buf.WriteByte(',')
+ if js.indent != "" {
+ buf.WriteByte('\n')
+ buf.WriteString(js.indent)
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\": ")
+ buf.Write(marshalled)
+ buf.WriteByte('\n')
+ } else {
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\":")
+ buf.Write(marshalled)
+ }
+ buf.WriteByte('}')
+
+ return buf.Bytes(), nil
+}
+
+// Signatures provides the signatures on this JWS as opaque blobs, sorted by
+// keyID. These blobs can be stored and reassembled with payloads. Internally,
+// they are simply marshaled json web signatures but implementations should
+// not rely on this.
+func (js *JSONSignature) Signatures() ([][]byte, error) {
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ var sb [][]byte
+ for _, jsig := range js.signatures {
+ p, err := json.Marshal(jsig)
+ if err != nil {
+ return nil, err
+ }
+
+ sb = append(sb, p)
+ }
+
+ return sb, nil
+}
+
+// Merge combines the signatures from one or more other signatures into the
+// method receiver. If the payloads differ for any argument, an error will be
+// returned and the receiver will not be modified.
+func (js *JSONSignature) Merge(others ...*JSONSignature) error {
+ merged := js.signatures
+ for _, other := range others {
+ if js.payload != other.payload {
+ return fmt.Errorf("payloads differ from merge target")
+ }
+ merged = append(merged, other.signatures...)
+ }
+
+ js.signatures = merged
+ return nil
+}
diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go
new file mode 100644
index 000000000..73642db2a
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/key.go
@@ -0,0 +1,253 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// PublicKey is a generic interface for a Public Key.
+type PublicKey interface {
+ // KeyType returns the key type for this key. For elliptic curve keys,
+ // this value should be "EC". For RSA keys, this value should be "RSA".
+ KeyType() string
+ // KeyID returns a distinct identifier which is unique to this Public Key.
+ // The format generated by this library is a base32 encoding of a 240 bit
+ // hash of the public key data divided into 12 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ KeyID() string
+ // Verify verifyies the signature of the data in the io.Reader using this
+ // Public Key. The alg parameter should identify the digital signature
+ // algorithm which was used to produce the signature and should be
+ // supported by this public key. Returns a nil error if the signature
+ // is valid.
+ Verify(data io.Reader, alg string, signature []byte) error
+ // CryptoPublicKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The type
+ // is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPublicKey() crypto.PublicKey
+ // These public keys can be serialized to the standard JSON encoding for
+ // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
+ // Algorithms.
+ MarshalJSON() ([]byte, error)
+ // These keys can also be serialized to the standard PEM encoding.
+ PEMBlock() (*pem.Block, error)
+ // The string representation of a key is its key type and ID.
+ String() string
+ AddExtendedField(string, interface{})
+ GetExtendedField(string) interface{}
+}
+
+// PrivateKey is a generic interface for a Private Key.
+type PrivateKey interface {
+ // A PrivateKey contains all fields and methods of a PublicKey of the
+ // same type. The MarshalJSON method also outputs the private key as a
+ // JSON Web Key, and the PEMBlock method outputs the private key as a
+ // PEM block.
+ PublicKey
+ // PublicKey returns the PublicKey associated with this PrivateKey.
+ PublicKey() PublicKey
+ // Sign signs the data read from the io.Reader using a signature algorithm
+ // supported by the private key. If the specified hashing algorithm is
+ // supported by this key, that hash function is used to generate the
+ // signature otherwise the the default hashing algorithm for this key is
+ // used. Returns the signature and identifier of the algorithm used.
+ Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
+ // CryptoPrivateKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The
+ // type is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPrivateKey() crypto.PrivateKey
+}
+
+// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
+// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
+ switch cryptoPublicKey := cryptoPublicKey.(type) {
+ case *ecdsa.PublicKey:
+ return fromECPublicKey(cryptoPublicKey)
+ case *rsa.PublicKey:
+ return fromRSAPublicKey(cryptoPublicKey), nil
+ default:
+ return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
+ }
+}
+
+// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
+// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
+ switch cryptoPrivateKey := cryptoPrivateKey.(type) {
+ case *ecdsa.PrivateKey:
+ return fromECPrivateKey(cryptoPrivateKey)
+ case *rsa.PrivateKey:
+ return fromRSAPrivateKey(cryptoPrivateKey), nil
+ default:
+ return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
+ }
+}
+
+// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
+// PublicKey or an error if there is a problem with the encoding.
+func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ return pubKeyFromPEMBlock(pemBlock)
+}
+
+// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
+// PEM blocks appended one after the other and returns a slice of PublicKey
+// objects that it finds.
+func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
+ pubKeys := []PublicKey{}
+
+ for {
+ var pemBlock *pem.Block
+ pemBlock, data = pem.Decode(data)
+ if pemBlock == nil {
+ break
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ pubKey, err := pubKeyFromPEMBlock(pemBlock)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
+// PrivateKey or an error if there is a problem with the encoding.
+func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ }
+
+ var key PrivateKey
+
+ switch {
+ case pemBlock.Type == "RSA PRIVATE KEY":
+ rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
+ }
+ key = fromRSAPrivateKey(rsaPrivateKey)
+ case pemBlock.Type == "EC PRIVATE KEY":
+ ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
+ }
+ key, err = fromECPrivateKey(ecPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
+ }
+
+ addPEMHeadersToKey(pemBlock, key.PublicKey())
+
+ return key, nil
+}
+
+// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
+// Public Key to be used with libtrust.
+func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Public Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Public Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC public key.
+ return ecPublicKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA public key.
+ return rsaPublicKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Public Key type not supported: %q\n", kty,
+ )
+ }
+}
+
+// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
+// and returns a slice of Public Key objects.
+func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
+ rawKeys, err := loadJSONKeySetRaw(data)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys := make([]PublicKey, 0, len(rawKeys))
+
+ for _, rawKey := range rawKeys {
+ pubKey, err := UnmarshalPublicKeyJWK(rawKey)
+ if err != nil {
+ return nil, err
+ }
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
+// Private Key to be used with libtrust.
+func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Private Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Private Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC private key.
+ return ecPrivateKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA private key.
+ return rsaPrivateKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Private Key type not supported: %q\n", kty,
+ )
+ }
+}
diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go
new file mode 100644
index 000000000..c526de545
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/key_files.go
@@ -0,0 +1,255 @@
+package libtrust
+
+import (
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+var (
+ // ErrKeyFileDoesNotExist indicates that the private key file does not exist.
+ ErrKeyFileDoesNotExist = errors.New("key file does not exist")
+)
+
+func readKeyFileBytes(filename string) ([]byte, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = ErrKeyFileDoesNotExist
+ } else {
+ err = fmt.Errorf("unable to read key file %s: %s", filename, err)
+ }
+
+ return nil, err
+ }
+
+ return data, nil
+}
+
+/*
+ Loading and Saving of Public and Private Keys in either PEM or JWK format.
+*/
+
+// LoadKeyFile opens the given filename and attempts to read a Private Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadKeyFile(filename string) (PrivateKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PrivateKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPrivateKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPrivateKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadPublicKeyFile(filename string) (PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PublicKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPublicKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPublicKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// SaveKey saves the given key to a file using the provided filename.
+// This process will overwrite any existing file at the provided location.
+func SaveKey(filename string, key PrivateKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode private key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode private key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
+ if err != nil {
+ return fmt.Errorf("unable to write private key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// SavePublicKey saves the given public key to the file.
+func SavePublicKey(filename string, key PublicKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode public key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode public key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write public key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// Public Key Set files
+
+type jwkSet struct {
+ Keys []json.RawMessage `json:"keys"`
+}
+
+// LoadKeySetFile loads a key set
+func LoadKeySetFile(filename string) ([]PublicKey, error) {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return loadJSONKeySetFile(filename)
+ }
+
+ // Must be a PEM format file
+ return loadPEMKeySetFile(filename)
+}
+
+func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
+ if len(data) == 0 {
+ // This is okay, just return an empty slice.
+ return []json.RawMessage{}, nil
+ }
+
+ keySet := jwkSet{}
+
+ err := json.Unmarshal(data, &keySet)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
+ }
+
+ return keySet.Keys, nil
+}
+
+func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyJWKSet(contents)
+}
+
+func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
+ data, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyPEMBundle(data)
+}
+
+// AddKeySetFile adds a key to a key set
+func AddKeySetFile(filename string, key PublicKey) error {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return addKeySetJSONFile(filename, key)
+ }
+
+ // Must be a PEM format file
+ return addKeySetPEMFile(filename, key)
+}
+
+func addKeySetJSONFile(filename string, key PublicKey) error {
+ encodedKey, err := json.Marshal(key)
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client key: %s", err)
+ }
+
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return err
+ }
+
+ rawEntries, err := loadJSONKeySetRaw(contents)
+ if err != nil {
+ return err
+ }
+
+ rawEntries = append(rawEntries, json.RawMessage(encodedKey))
+ entriesWrapper := jwkSet{Keys: rawEntries}
+
+ encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client keys: %s", err)
+ }
+
+ err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+func addKeySetPEMFile(filename string, key PublicKey) error {
+ // Encode to PEM, open file for appending, write PEM.
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
+ }
+ defer file.Close()
+
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encoded trusted key: %s", err)
+ }
+
+ _, err = file.Write(pem.EncodeToMemory(pemBlock))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted keys file: %s", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go
new file mode 100644
index 000000000..9a98ae357
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/key_manager.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "sync"
+)
+
+// ClientKeyManager manages client keys on the filesystem
+type ClientKeyManager struct {
+ key PrivateKey
+ clientFile string
+ clientDir string
+
+ clientLock sync.RWMutex
+ clients []PublicKey
+
+ configLock sync.Mutex
+ configs []*tls.Config
+}
+
+// NewClientKeyManager loads a new manager from a set of key files
+// and managed by the given private key.
+func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
+ m := &ClientKeyManager{
+ key: trustKey,
+ clientFile: clientFile,
+ clientDir: clientDir,
+ }
+ if err := m.loadKeys(); err != nil {
+ return nil, err
+ }
+ // TODO Start watching file and directory
+
+ return m, nil
+}
+
+func (c *ClientKeyManager) loadKeys() (err error) {
+ // Load authorized keys file
+ var clients []PublicKey
+ if c.clientFile != "" {
+ clients, err = LoadKeySetFile(c.clientFile)
+ if err != nil {
+ return fmt.Errorf("unable to load authorized keys: %s", err)
+ }
+ }
+
+ // Add clients from authorized keys directory
+ files, err := ioutil.ReadDir(c.clientDir)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("unable to open authorized keys directory: %s", err)
+ }
+ for _, f := range files {
+ if !f.IsDir() {
+ publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
+ if err != nil {
+ return fmt.Errorf("unable to load authorized key file: %s", err)
+ }
+ clients = append(clients, publicKey)
+ }
+ }
+
+ c.clientLock.Lock()
+ c.clients = clients
+ c.clientLock.Unlock()
+
+ return nil
+}
+
+// RegisterTLSConfig registers a tls configuration to manager
+// such that any changes to the keys may be reflected in
+// the tls client CA pool
+func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
+ c.clientLock.RLock()
+ certPool, err := GenerateCACertPool(c.key, c.clients)
+ if err != nil {
+ return fmt.Errorf("CA pool generation error: %s", err)
+ }
+ c.clientLock.RUnlock()
+
+ tlsConfig.ClientCAs = certPool
+
+ c.configLock.Lock()
+ c.configs = append(c.configs, tlsConfig)
+ c.configLock.Unlock()
+
+ return nil
+}
+
+// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
+// libtrust identity authentication for the domain specified
+func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
+ return nil, err
+ }
+
+ // Generate cert
+ ips, domains, err := parseAddr(addr)
+ if err != nil {
+ return nil, err
+ }
+ // add domain that it expects clients to use
+ domains = append(domains, domain)
+ x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ return tlsConfig, nil
+}
+
+// NewCertAuthTLSConfig creates a tls.Config for the server to use for
+// certificate authentication
+func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ cert, err := tls.LoadX509KeyPair(certPath, keyPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+
+ // Verify client certificates against a CA?
+ if caPath != "" {
+ certPool := x509.NewCertPool()
+ file, err := ioutil.ReadFile(caPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
+ }
+ certPool.AppendCertsFromPEM(file)
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ tlsConfig.ClientCAs = certPool
+ }
+
+ return tlsConfig, nil
+}
+
+func newTLSConfig() *tls.Config {
+ return &tls.Config{
+ NextProtos: []string{"http/1.1"},
+ // Avoid fallback on insecure SSL protocols
+ MinVersion: tls.VersionTLS10,
+ }
+}
+
+// parseAddr parses an address into an array of IPs and domains
+func parseAddr(addr string) ([]net.IP, []string, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ var domains []string
+ var ips []net.IP
+ ip := net.ParseIP(host)
+ if ip != nil {
+ ips = []net.IP{ip}
+ } else {
+ domains = []string{host}
+ }
+ return ips, domains, nil
+}
diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go
new file mode 100644
index 000000000..dac4cacf2
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/rsa_key.go
@@ -0,0 +1,427 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * RSA DSA PUBLIC KEY
+ */
+
+// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
+type rsaPublicKey struct {
+ *rsa.PublicKey
+ extended map[string]interface{}
+}
+
+func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
+ return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
+}
+
+// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
+func (k *rsaPublicKey) KeyType() string {
+ return "RSA"
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *rsaPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *rsaPublicKey) String() string {
+ return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this Public Key.
+// The alg parameter should be the name of the JWA digital signature algorithm
+// which was used to produce the signature and should be supported by this
+// public key. Returns a nil error if the signature is valid.
+func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // Verify the signature of the given date, return non-nil error if valid.
+ sigAlg, err := rsaSignatureAlgorithmByName(alg)
+ if err != nil {
+ return fmt.Errorf("unable to verify Signature: %s", err)
+ }
+
+ hasher := sigAlg.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
+ if err != nil {
+ return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *rsaPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
+ jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract 'n', 'e', and 'kid' and check for
+ // consistency.
+
+ // Get the modulus parameter N.
+ nB64Url, err := stringFromMap(jwk, "n")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ n, err := parseRSAModulusParam(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ // Get the public exponent E.
+ eB64Url, err := stringFromMap(jwk, "e")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ e, err := parseRSAPublicExponentParam(eB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ key := &rsaPublicKey{
+ PublicKey: &rsa.PublicKey{N: n, E: e},
+ }
+
+ // Key ID is optional, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
+ }
+ }
+
+ if _, ok := jwk["d"]; ok {
+ return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * RSA DSA PRIVATE KEY
+ */
+
+// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
+type rsaPrivateKey struct {
+ rsaPublicKey
+ *rsa.PrivateKey
+}
+
+func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
+ return &rsaPrivateKey{
+ *fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
+ cryptoPrivateKey,
+ }
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *rsaPrivateKey) PublicKey() PublicKey {
+ return &k.rsaPublicKey
+}
+
+func (k *rsaPrivateKey) String() string {
+ return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the RSA private key. If the specified hashing algorithm is supported by
+// this key, that hash function is used to generate the signature otherwise the
+// the default hashing algorithm for this key is used. Returns the signature
+// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
+// "RS512".
+func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
+ hasher := sigAlg.HashID().New()
+
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+
+ alg = sigAlg.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *rsaPrivateKey) toMap() map[string]interface{} {
+ k.Precompute() // Make sure the precomputed values are stored.
+ jwk := k.rsaPublicKey.toMap()
+
+ jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
+ jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
+ jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
+ jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
+ jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
+ jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
+
+ otherPrimes := k.Primes[2:]
+
+ if len(otherPrimes) > 0 {
+ otherPrimesInfo := make([]interface{}, len(otherPrimes))
+ for i, r := range otherPrimes {
+ otherPrimeInfo := make(map[string]string, 3)
+ otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
+ crtVal := k.Precomputed.CRTValues[i]
+ otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
+ otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
+ otherPrimesInfo[i] = otherPrimeInfo
+ }
+ jwk["oth"] = otherPrimesInfo
+ }
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
+}
+
+func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
+ // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
+ // only the private key exponent 'd' is REQUIRED, the others are just for
+ // signature/decryption optimizations and SHOULD be included when the JWK
+ // is produced. We MAY choose to accept a JWK which only includes 'd', but
+ // we're going to go ahead and not choose to accept it without the extra
+ // fields. Only the 'oth' field will be optional (for multi-prime keys).
+ privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
+ }
+ firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ var oth interface{}
+ if _, ok := jwk["oth"]; ok {
+ oth = jwk["oth"]
+ delete(jwk, "oth")
+ }
+
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract the public key information, then extract the private
+ // key values.
+ publicKey, err := rsaPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ privateKey := &rsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: privateExponent,
+ Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
+ Precomputed: rsa.PrecomputedValues{
+ Dp: firstFactorCRT,
+ Dq: secondFactorCRT,
+ Qinv: crtCoeff,
+ },
+ }
+
+ if oth != nil {
+ // Should be an array of more JSON objects.
+ otherPrimesInfo, ok := oth.([]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
+ }
+ numOtherPrimeFactors := len(otherPrimesInfo)
+ if numOtherPrimeFactors == 0 {
+ return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
+ }
+ otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
+ productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
+ crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
+
+ for i, val := range otherPrimesInfo {
+ otherPrimeinfo, ok := val.(map[string]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
+ }
+
+ otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ crtValue := crtValues[i]
+ crtValue.Exp = otherFactorCRT
+ crtValue.Coeff = otherCrtCoeff
+ crtValue.R = productOfPrimes
+ otherPrimeFactors[i] = otherPrimeFactor
+ productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
+ }
+
+ privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
+ privateKey.Precomputed.CRTValues = crtValues
+ }
+
+ key := &rsaPrivateKey{
+ rsaPublicKey: *publicKey,
+ PrivateKey: privateKey,
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
+ k = new(rsaPrivateKey)
+ k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, err
+ }
+
+ k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
+func GenerateRSA2048PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(2048)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
+func GenerateRSA3072PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(3072)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
+func GenerateRSA4096PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(4096)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
+ }
+
+ return k, nil
+}
diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go
new file mode 100644
index 000000000..a5a101d3f
--- /dev/null
+++ b/vendor/github.com/docker/libtrust/util.go
@@ -0,0 +1,363 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/elliptic"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math/big"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// LoadOrCreateTrustKey will load a PrivateKey from the specified path
+func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
+ if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
+ return nil, err
+ }
+
+ trustKey, err := LoadKeyFile(trustKeyPath)
+ if err == ErrKeyFileDoesNotExist {
+ trustKey, err = GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, fmt.Errorf("error generating key: %s", err)
+ }
+
+ if err := SaveKey(trustKeyPath, trustKey); err != nil {
+ return nil, fmt.Errorf("error saving key file: %s", err)
+ }
+
+ dir, file := filepath.Split(trustKeyPath)
+ if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
+ return nil, fmt.Errorf("error saving public key file: %s", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("error loading key file: %s", err)
+ }
+ return trustKey, nil
+}
+
+// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
+// based authentication from the specified dockerUrl, the rootConfigPath and
+// the server name to which it is connecting.
+// If trustUnknownHosts is true it will automatically add the host to the
+// known-hosts.json in rootConfigPath.
+func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ trustKeyPath := filepath.Join(rootConfigPath, "key.json")
+ knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
+
+ u, err := url.Parse(dockerUrl)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse machine url")
+ }
+
+ if u.Scheme == "unix" {
+ return nil, nil
+ }
+
+ addr := u.Host
+ proto := "tcp"
+
+ trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load trust key: %s", err)
+ }
+
+ knownHosts, err := LoadKeySetFile(knownHostsPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
+ }
+
+ allowedHosts, err := FilterByHosts(knownHosts, addr, false)
+ if err != nil {
+ return nil, fmt.Errorf("error filtering hosts: %s", err)
+ }
+
+ certPool, err := GenerateCACertPool(trustKey, allowedHosts)
+ if err != nil {
+ return nil, fmt.Errorf("Could not create CA pool: %s", err)
+ }
+
+ tlsConfig.ServerName = serverName
+ tlsConfig.RootCAs = certPool
+
+ x509Cert, err := GenerateSelfSignedClientCert(trustKey)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ tlsConfig.InsecureSkipVerify = true
+
+ testConn, err := tls.Dial(proto, addr, tlsConfig)
+ if err != nil {
+ return nil, fmt.Errorf("tls Handshake error: %s", err)
+ }
+
+ opts := x509.VerifyOptions{
+ Roots: tlsConfig.RootCAs,
+ CurrentTime: time.Now(),
+ DNSName: tlsConfig.ServerName,
+ Intermediates: x509.NewCertPool(),
+ }
+
+ certs := testConn.ConnectionState().PeerCertificates
+ for i, cert := range certs {
+ if i == 0 {
+ continue
+ }
+ opts.Intermediates.AddCert(cert)
+ }
+
+ if _, err := certs[0].Verify(opts); err != nil {
+ if _, ok := err.(x509.UnknownAuthorityError); ok {
+ if trustUnknownHosts {
+ pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("error extracting public key from cert: %s", err)
+ }
+
+ pubKey.AddExtendedField("hosts", []string{addr})
+
+ if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
+ return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
+ }
+ } else {
+ return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
+ }
+ }
+ }
+
+ testConn.Close()
+ tlsConfig.InsecureSkipVerify = false
+
+ return tlsConfig, nil
+}
+
+// joseBase64UrlEncode encodes the given data using the standard base64 url
+// encoding format but with all trailing '=' characters omitted in accordance
+// with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlEncode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// joseBase64UrlDecode decodes the given string using the standard base64 url
+// decoder but first adds the appropriate number of trailing '=' characters in
+// accordance with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlDecode(s string) ([]byte, error) {
+ s = strings.Replace(s, "\n", "", -1)
+ s = strings.Replace(s, " ", "", -1)
+ switch len(s) % 4 {
+ case 0:
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ default:
+ return nil, errors.New("illegal base64url string")
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
+
+func keyIDEncode(b []byte) string {
+ s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
+ var buf bytes.Buffer
+ var i int
+ for i = 0; i < len(s)/4-1; i++ {
+ start := i * 4
+ end := start + 4
+ buf.WriteString(s[start:end] + ":")
+ }
+ buf.WriteString(s[i*4:])
+ return buf.String()
+}
+
+func keyIDFromCryptoKey(pubKey PublicKey) string {
+ // Generate and return a 'libtrust' fingerprint of the public key.
+ // For an RSA key this should be:
+ // SHA256(DER encoded ASN1)
+ // Then truncated to 240 bits and encoded into 12 base32 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
+ if err != nil {
+ return ""
+ }
+ hasher := crypto.SHA256.New()
+ hasher.Write(derBytes)
+ return keyIDEncode(hasher.Sum(nil)[:30])
+}
+
+func stringFromMap(m map[string]interface{}, key string) (string, error) {
+ val, ok := m[key]
+ if !ok {
+ return "", fmt.Errorf("%q value not specified", key)
+ }
+
+ str, ok := val.(string)
+ if !ok {
+ return "", fmt.Errorf("%q value must be a string", key)
+ }
+ delete(m, key)
+
+ return str, nil
+}
+
+func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ curveByteLen := (curve.Params().BitSize + 7) >> 3
+
+ cBytes, err := joseBase64UrlDecode(cB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ cByteLength := len(cBytes)
+ if cByteLength != curveByteLen {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
+ }
+ return new(big.Int).SetBytes(cBytes), nil
+}
+
+func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ dBytes, err := joseBase64UrlDecode(dB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := curve.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ dByteLength := len(dBytes)
+
+ if dByteLength != octetLength {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
+ }
+
+ return new(big.Int).SetBytes(dBytes), nil
+}
+
+func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
+ nBytes, err := joseBase64UrlDecode(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(nBytes), nil
+}
+
+func serializeRSAPublicExponentParam(e int) []byte {
+ // We MUST use the minimum number of octets to represent E.
+ // E is supposed to be 65537 for performance and security reasons
+ // and is what golang's rsa package generates, but it might be
+ // different if imported from some other generator.
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(e))
+ var i int
+ for i = 0; i < 8; i++ {
+ if buf[i] != 0 {
+ break
+ }
+ }
+ return buf[i:]
+}
+
+func parseRSAPublicExponentParam(eB64Url string) (int, error) {
+ eBytes, err := joseBase64UrlDecode(eB64Url)
+ if err != nil {
+ return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ // Only the minimum number of bytes were used to represent E, but
+ // binary.BigEndian.Uint32 expects at least 4 bytes, so we need
+ // to add zero padding if necassary.
+ byteLen := len(eBytes)
+ buf := make([]byte, 4-byteLen, 4)
+ eBytes = append(buf, eBytes...)
+
+ return int(binary.BigEndian.Uint32(eBytes)), nil
+}
+
+func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
+ b64Url, err := stringFromMap(m, key)
+ if err != nil {
+ return nil, err
+ }
+
+ paramBytes, err := joseBase64UrlDecode(b64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(paramBytes), nil
+}
+
+func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
+ pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
+ for k, v := range headers {
+ switch val := v.(type) {
+ case string:
+ pemBlock.Headers[k] = val
+ case []string:
+ if k == "hosts" {
+ pemBlock.Headers[k] = strings.Join(val, ",")
+ } else {
+ // Return error, non-encodable type
+ }
+ default:
+ // Return error, non-encodable type
+ }
+ }
+
+ return pemBlock, nil
+}
+
+func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
+ cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
+ }
+
+ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ addPEMHeadersToKey(pemBlock, pubKey)
+
+ return pubKey, nil
+}
+
+func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
+ for key, value := range pemBlock.Headers {
+ var safeVal interface{}
+ if key == "hosts" {
+ safeVal = strings.Split(value, ",")
+ } else {
+ safeVal = value
+ }
+ pubKey.AddExtendedField(key, safeVal)
+ }
+}
diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE
new file mode 100644
index 000000000..9e4bd4dbe
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs
new file mode 100644
index 000000000..e26cd4fc8
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md
new file mode 100644
index 000000000..11cccd0a0
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/README.md
@@ -0,0 +1,77 @@
+# SpdyStream
+
+A multiplexed stream library using spdy
+
+## Usage
+
+Client example (connecting to mirroring server without auth)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/docker/spdystream"
+ "net"
+ "net/http"
+)
+
+func main() {
+ conn, err := net.Dial("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, false)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.NoOpStreamHandler)
+ stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
+ if err != nil {
+ panic(err)
+ }
+
+ stream.Wait()
+
+ fmt.Fprint(stream, "Writing to stream")
+
+ buf := make([]byte, 25)
+ stream.Read(buf)
+ fmt.Println(string(buf))
+
+ stream.Close()
+}
+```
+
+Server example (mirroring server without auth)
+
+```go
+package main
+
+import (
+ "github.com/docker/spdystream"
+ "net"
+)
+
+func main() {
+ listener, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, true)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.MirrorStreamHandler)
+ }
+}
+```
+
+## Copyright and license
+
+Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
new file mode 100644
index 000000000..df27d1dd1
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/connection.go
@@ -0,0 +1,959 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrInvalidStreamId = errors.New("Invalid stream id")
+ ErrTimeout = errors.New("Timeout occured")
+ ErrReset = errors.New("Stream reset")
+ ErrWriteClosedStream = errors.New("Write on closed stream")
+)
+
+const (
+ FRAME_WORKERS = 5
+ QUEUE_SIZE = 50
+)
+
+type StreamHandler func(stream *Stream)
+
+type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
+
+type idleAwareFramer struct {
+ f *spdy.Framer
+ conn *Connection
+ writeLock sync.Mutex
+ resetChan chan struct{}
+ setTimeoutLock sync.Mutex
+ setTimeoutChan chan time.Duration
+ timeout time.Duration
+}
+
+func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
+ iaf := &idleAwareFramer{
+ f: framer,
+ resetChan: make(chan struct{}, 2),
+ // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
+ // the same time the connection is being closed
+ setTimeoutChan: make(chan time.Duration, 1),
+ }
+ return iaf
+}
+
+func (i *idleAwareFramer) monitor() {
+ var (
+ timer *time.Timer
+ expired <-chan time.Time
+ resetChan = i.resetChan
+ setTimeoutChan = i.setTimeoutChan
+ )
+Loop:
+ for {
+ select {
+ case timeout := <-i.setTimeoutChan:
+ i.timeout = timeout
+ if timeout == 0 {
+ if timer != nil {
+ timer.Stop()
+ }
+ } else {
+ if timer == nil {
+ timer = time.NewTimer(timeout)
+ expired = timer.C
+ } else {
+ timer.Reset(timeout)
+ }
+ }
+ case <-resetChan:
+ if timer != nil && i.timeout > 0 {
+ timer.Reset(i.timeout)
+ }
+ case <-expired:
+ i.conn.streamCond.L.Lock()
+ streams := i.conn.streams
+ i.conn.streams = make(map[spdy.StreamId]*Stream)
+ i.conn.streamCond.Broadcast()
+ i.conn.streamCond.L.Unlock()
+ go func() {
+ for _, stream := range streams {
+ stream.resetStream()
+ }
+ i.conn.Close()
+ }()
+ case <-i.conn.closeChan:
+ if timer != nil {
+ timer.Stop()
+ }
+
+ // Start a goroutine to drain resetChan. This is needed because we've seen
+ // some unit tests with large numbers of goroutines get into a situation
+ // where resetChan fills up, at least 1 call to Write() is still trying to
+ // send to resetChan, the connection gets closed, and this case statement
+ // attempts to grab the write lock that Write() already has, causing a
+ // deadlock.
+ //
+ // See https://github.com/docker/spdystream/issues/49 for more details.
+ go func() {
+ for _ = range resetChan {
+ }
+ }()
+
+ go func() {
+ for _ = range setTimeoutChan {
+ }
+ }()
+
+ i.writeLock.Lock()
+ close(resetChan)
+ i.resetChan = nil
+ i.writeLock.Unlock()
+
+ i.setTimeoutLock.Lock()
+ close(i.setTimeoutChan)
+ i.setTimeoutChan = nil
+ i.setTimeoutLock.Unlock()
+
+ break Loop
+ }
+ }
+
+ // Drain resetChan
+ for _ = range resetChan {
+ }
+}
+
+func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
+ i.writeLock.Lock()
+ defer i.writeLock.Unlock()
+ if i.resetChan == nil {
+ return io.EOF
+ }
+ err := i.f.WriteFrame(frame)
+ if err != nil {
+ return err
+ }
+
+ i.resetChan <- struct{}{}
+
+ return nil
+}
+
+func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
+ frame, err := i.f.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+
+ // resetChan should never be closed since it is only closed
+ // when the connection has closed its closeChan. This closure
+ // only occurs after all Reads have finished
+ // TODO (dmcgowan): refactor relationship into connection
+ i.resetChan <- struct{}{}
+
+ return frame, nil
+}
+
+func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
+ i.setTimeoutLock.Lock()
+ defer i.setTimeoutLock.Unlock()
+
+ if i.setTimeoutChan == nil {
+ return
+ }
+
+ i.setTimeoutChan <- timeout
+}
+
+type Connection struct {
+ conn net.Conn
+ framer *idleAwareFramer
+
+ closeChan chan bool
+ goneAway bool
+ lastStreamChan chan<- *Stream
+ goAwayTimeout time.Duration
+ closeTimeout time.Duration
+
+ streamLock *sync.RWMutex
+ streamCond *sync.Cond
+ streams map[spdy.StreamId]*Stream
+
+ nextIdLock sync.Mutex
+ receiveIdLock sync.Mutex
+ nextStreamId spdy.StreamId
+ receivedStreamId spdy.StreamId
+
+ pingIdLock sync.Mutex
+ pingId uint32
+ pingChans map[uint32]chan error
+
+ shutdownLock sync.Mutex
+ shutdownChan chan error
+ hasShutdown bool
+
+ // for testing https://github.com/docker/spdystream/pull/56
+ dataFrameHandler func(*spdy.DataFrame) error
+}
+
+// NewConnection creates a new spdy connection from an existing
+// network connection.
+func NewConnection(conn net.Conn, server bool) (*Connection, error) {
+ framer, framerErr := spdy.NewFramer(conn, conn)
+ if framerErr != nil {
+ return nil, framerErr
+ }
+ idleAwareFramer := newIdleAwareFramer(framer)
+ var sid spdy.StreamId
+ var rid spdy.StreamId
+ var pid uint32
+ if server {
+ sid = 2
+ rid = 1
+ pid = 2
+ } else {
+ sid = 1
+ rid = 2
+ pid = 1
+ }
+
+ streamLock := new(sync.RWMutex)
+ streamCond := sync.NewCond(streamLock)
+
+ session := &Connection{
+ conn: conn,
+ framer: idleAwareFramer,
+
+ closeChan: make(chan bool),
+ goAwayTimeout: time.Duration(0),
+ closeTimeout: time.Duration(0),
+
+ streamLock: streamLock,
+ streamCond: streamCond,
+ streams: make(map[spdy.StreamId]*Stream),
+ nextStreamId: sid,
+ receivedStreamId: rid,
+
+ pingId: pid,
+ pingChans: make(map[uint32]chan error),
+
+ shutdownChan: make(chan error),
+ }
+ session.dataFrameHandler = session.handleDataFrame
+ idleAwareFramer.conn = session
+ go idleAwareFramer.monitor()
+
+ return session, nil
+}
+
+// Ping sends a ping frame across the connection and
+// returns the response time
+func (s *Connection) Ping() (time.Duration, error) {
+ pid := s.pingId
+ s.pingIdLock.Lock()
+ if s.pingId > 0x7ffffffe {
+ s.pingId = s.pingId - 0x7ffffffe
+ } else {
+ s.pingId = s.pingId + 2
+ }
+ s.pingIdLock.Unlock()
+ pingChan := make(chan error)
+ s.pingChans[pid] = pingChan
+ defer delete(s.pingChans, pid)
+
+ frame := &spdy.PingFrame{Id: pid}
+ startTime := time.Now()
+ writeErr := s.framer.WriteFrame(frame)
+ if writeErr != nil {
+ return time.Duration(0), writeErr
+ }
+ select {
+ case <-s.closeChan:
+ return time.Duration(0), errors.New("connection closed")
+ case err, ok := <-pingChan:
+ if ok && err != nil {
+ return time.Duration(0), err
+ }
+ break
+ }
+ return time.Now().Sub(startTime), nil
+}
+
+// Serve handles frames sent from the server, including reply frames
+// which are needed to fully initiate connections. Both clients and servers
+// should call Serve in a separate goroutine before creating streams.
+func (s *Connection) Serve(newHandler StreamHandler) {
+ // use a WaitGroup to wait for all frames to be drained after receiving
+ // go-away.
+ var wg sync.WaitGroup
+
+ // Parition queues to ensure stream frames are handled
+ // by the same worker, ensuring order is maintained
+ frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
+ for i := 0; i < FRAME_WORKERS; i++ {
+ frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
+
+ // Ensure frame queue is drained when connection is closed
+ go func(frameQueue *PriorityFrameQueue) {
+ <-s.closeChan
+ frameQueue.Drain()
+ }(frameQueues[i])
+
+ wg.Add(1)
+ go func(frameQueue *PriorityFrameQueue) {
+ // let the WaitGroup know this worker is done
+ defer wg.Done()
+
+ s.frameHandler(frameQueue, newHandler)
+ }(frameQueues[i])
+ }
+
+ var (
+ partitionRoundRobin int
+ goAwayFrame *spdy.GoAwayFrame
+ )
+Loop:
+ for {
+ readFrame, err := s.framer.ReadFrame()
+ if err != nil {
+ if err != io.EOF {
+ debugMessage("frame read error: %s", err)
+ } else {
+ debugMessage("(%p) EOF received", s)
+ }
+ break
+ }
+ var priority uint8
+ var partition int
+ switch frame := readFrame.(type) {
+ case *spdy.SynStreamFrame:
+ if s.checkStreamFrame(frame) {
+ priority = frame.Priority
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
+ s.addStreamFrame(frame)
+ } else {
+ debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
+ continue
+ }
+ case *spdy.SynReplyFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.DataFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.RstStreamFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.HeadersFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.PingFrame:
+ priority = 0
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ case *spdy.GoAwayFrame:
+ // hold on to the go away frame and exit the loop
+ goAwayFrame = frame
+ break Loop
+ default:
+ priority = 7
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ }
+ frameQueues[partition].Push(readFrame, priority)
+ }
+ close(s.closeChan)
+
+ // wait for all frame handler workers to indicate they've drained their queues
+ // before handling the go away frame
+ wg.Wait()
+
+ if goAwayFrame != nil {
+ s.handleGoAwayFrame(goAwayFrame)
+ }
+
+ // now it's safe to close remote channels and empty s.streams
+ s.streamCond.L.Lock()
+ // notify streams that they're now closed, which will
+ // unblock any stream Read() calls
+ for _, stream := range s.streams {
+ stream.closeRemoteChannels()
+ }
+ s.streams = make(map[spdy.StreamId]*Stream)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
+ for {
+ popFrame := frameQueue.Pop()
+ if popFrame == nil {
+ return
+ }
+
+ var frameErr error
+ switch frame := popFrame.(type) {
+ case *spdy.SynStreamFrame:
+ frameErr = s.handleStreamFrame(frame, newHandler)
+ case *spdy.SynReplyFrame:
+ frameErr = s.handleReplyFrame(frame)
+ case *spdy.DataFrame:
+ frameErr = s.dataFrameHandler(frame)
+ case *spdy.RstStreamFrame:
+ frameErr = s.handleResetFrame(frame)
+ case *spdy.HeadersFrame:
+ frameErr = s.handleHeaderFrame(frame)
+ case *spdy.PingFrame:
+ frameErr = s.handlePingFrame(frame)
+ case *spdy.GoAwayFrame:
+ frameErr = s.handleGoAwayFrame(frame)
+ default:
+ frameErr = fmt.Errorf("unhandled frame type: %T", frame)
+ }
+
+ if frameErr != nil {
+ debugMessage("frame handling error: %s", frameErr)
+ }
+ }
+}
+
+func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
+ stream, streamOk := s.getStream(streamId)
+ if !streamOk {
+ return 7
+ }
+ return stream.priority
+}
+
+func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
+ var parent *Stream
+ if frame.AssociatedToStreamId != spdy.StreamId(0) {
+ parent, _ = s.getStream(frame.AssociatedToStreamId)
+ }
+
+ stream := &Stream{
+ streamId: frame.StreamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: frame.Headers,
+ finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
+ replyCond: sync.NewCond(new(sync.Mutex)),
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ priority: frame.Priority,
+ }
+ if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
+ stream.closeRemoteChannels()
+ }
+
+ s.addStream(stream)
+}
+
+// checkStreamFrame checks to see if a stream frame is allowed.
+// If the stream is invalid, then a reset frame with protocol error
+// will be returned.
+func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
+ s.receiveIdLock.Lock()
+ defer s.receiveIdLock.Unlock()
+ if s.goneAway {
+ return false
+ }
+ validationErr := s.validateStreamId(frame.StreamId)
+ if validationErr != nil {
+ go func() {
+ resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
+ if resetErr != nil {
+ debugMessage("reset error: %s", resetErr)
+ }
+ }()
+ return false
+ }
+ return true
+}
+
+func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
+ stream, ok := s.getStream(frame.StreamId)
+ if !ok {
+ return fmt.Errorf("Missing stream: %d", frame.StreamId)
+ }
+
+ newHandler(stream)
+
+ return nil
+}
+
+func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
+ debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("Reply frame gone away for %d", frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if stream.replied {
+ // Stream has already received reply
+ return nil
+ }
+ stream.replied = true
+
+ // TODO Check for error
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ close(stream.startChan)
+
+ return nil
+}
+
+func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already been removed
+ return nil
+ }
+ s.removeStream(stream)
+ stream.closeRemoteChannels()
+
+ if !stream.replied {
+ stream.replied = true
+ stream.startChan <- ErrReset
+ close(stream.startChan)
+ }
+
+ stream.finishLock.Lock()
+ stream.finished = true
+ stream.finishLock.Unlock()
+
+ return nil
+}
+
+func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ // TODO limit headers while not blocking (use buffered chan or goroutine?)
+ select {
+ case <-stream.closeChan:
+ return nil
+ case stream.headerChan <- frame.Headers:
+ }
+
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ return nil
+}
+
+func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
+ debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
+ if len(frame.Data) > 0 {
+ stream.dataLock.RLock()
+ select {
+ case <-stream.closeChan:
+ debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
+ case stream.dataChan <- frame.Data:
+ debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
+ }
+ stream.dataLock.RUnlock()
+ }
+ if (frame.Flags & spdy.DataFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+ return nil
+}
+
+func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
+ if s.pingId&0x01 != frame.Id&0x01 {
+ return s.framer.WriteFrame(frame)
+ }
+ pingChan, pingOk := s.pingChans[frame.Id]
+ if pingOk {
+ close(pingChan)
+ }
+ return nil
+}
+
+func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
+ debugMessage("(%p) Go away received", s)
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ if s.lastStreamChan != nil {
+ stream, _ := s.getStream(frame.LastGoodStreamId)
+ go func() {
+ s.lastStreamChan <- stream
+ }()
+ }
+
+ // Do not block frame handler waiting for closure
+ go s.shutdown(s.goAwayTimeout)
+
+ return nil
+}
+
+func (s *Connection) remoteStreamFinish(stream *Stream) {
+ stream.closeRemoteChannels()
+
+ stream.finishLock.Lock()
+ if stream.finished {
+ // Stream is fully closed, cleanup
+ s.removeStream(stream)
+ }
+ stream.finishLock.Unlock()
+}
+
+// CreateStream creates a new spdy stream using the parameters for
+// creating the stream frame. The stream frame will be sent upon
+// calling this function, however this function does not wait for
+// the reply frame. If waiting for the reply is desired, use
+// the stream Wait or WaitTimeout function on the stream returned
+// by this function.
+func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
+ // MUST synchronize stream creation (all the way to writing the frame)
+ // as stream IDs **MUST** increase monotonically.
+ s.nextIdLock.Lock()
+ defer s.nextIdLock.Unlock()
+
+ streamId := s.getNextStreamId()
+ if streamId == 0 {
+ return nil, fmt.Errorf("Unable to get new stream id")
+ }
+
+ stream := &Stream{
+ streamId: streamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: headers,
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ }
+
+ debugMessage("(%p) (%p) Create stream", s, stream)
+
+ s.addStream(stream)
+
+ return stream, s.sendStream(stream, fin)
+}
+
+func (s *Connection) shutdown(closeTimeout time.Duration) {
+ // TODO Ensure this isn't called multiple times
+ s.shutdownLock.Lock()
+ if s.hasShutdown {
+ s.shutdownLock.Unlock()
+ return
+ }
+ s.hasShutdown = true
+ s.shutdownLock.Unlock()
+
+ var timeout <-chan time.Time
+ if closeTimeout > time.Duration(0) {
+ timeout = time.After(closeTimeout)
+ }
+ streamsClosed := make(chan bool)
+
+ go func() {
+ s.streamCond.L.Lock()
+ for len(s.streams) > 0 {
+ debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
+ s.streamCond.Wait()
+ }
+ s.streamCond.L.Unlock()
+ close(streamsClosed)
+ }()
+
+ var err error
+ select {
+ case <-streamsClosed:
+ // No active streams, close should be safe
+ err = s.conn.Close()
+ case <-timeout:
+ // Force ungraceful close
+ err = s.conn.Close()
+ // Wait for cleanup to clear active streams
+ <-streamsClosed
+ }
+
+ if err != nil {
+ duration := 10 * time.Minute
+ time.AfterFunc(duration, func() {
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ debugMessage("Unhandled close error after %s: %s", duration, err)
+ }
+ default:
+ }
+ })
+ s.shutdownChan <- err
+ }
+ close(s.shutdownChan)
+
+ return
+}
+
+// Closes spdy connection by sending GoAway frame and initiating shutdown
+func (s *Connection) Close() error {
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ var lastStreamId spdy.StreamId
+ if s.receivedStreamId > 2 {
+ lastStreamId = s.receivedStreamId - 2
+ }
+
+ goAwayFrame := &spdy.GoAwayFrame{
+ LastGoodStreamId: lastStreamId,
+ Status: spdy.GoAwayOK,
+ }
+
+ err := s.framer.WriteFrame(goAwayFrame)
+ if err != nil {
+ return err
+ }
+
+ go s.shutdown(s.closeTimeout)
+
+ return nil
+}
+
+// CloseWait closes the connection and waits for shutdown
+// to finish. Note the underlying network Connection
+// is not closed until the end of shutdown.
+func (s *Connection) CloseWait() error {
+ closeErr := s.Close()
+ if closeErr != nil {
+ return closeErr
+ }
+ shutdownErr, ok := <-s.shutdownChan
+ if ok {
+ return shutdownErr
+ }
+ return nil
+}
+
+// Wait waits for the connection to finish shutdown or for
+// the wait timeout duration to expire. This needs to be
+// called either after Close has been called or the GOAWAYFRAME
+// has been received. If the wait timeout is 0, this function
+// will block until shutdown finishes. If wait is never called
+// and a shutdown error occurs, that error will be logged as an
+// unhandled error.
+func (s *Connection) Wait(waitTimeout time.Duration) error {
+ var timeout <-chan time.Time
+ if waitTimeout > time.Duration(0) {
+ timeout = time.After(waitTimeout)
+ }
+
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ return err
+ }
+ case <-timeout:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// NotifyClose registers a channel to be called when the remote
+// peer inidicates connection closure. The last stream to be
+// received by the remote will be sent on the channel. The notify
+// timeout will determine the duration between go away received
+// and the connection being closed.
+func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
+ s.goAwayTimeout = timeout
+ s.lastStreamChan = c
+}
+
+// SetCloseTimeout sets the amount of time close will wait for
+// streams to finish before terminating the underlying network
+// connection. Setting the timeout to 0 will cause close to
+// wait forever, which is the default.
+func (s *Connection) SetCloseTimeout(timeout time.Duration) {
+ s.closeTimeout = timeout
+}
+
+// SetIdleTimeout sets the amount of time the connection may sit idle before
+// it is forcefully terminated.
+func (s *Connection) SetIdleTimeout(timeout time.Duration) {
+ s.framer.setIdleTimeout(timeout)
+}
+
+func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ headerFrame := &spdy.HeadersFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(headerFrame)
+}
+
+func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ replyFrame := &spdy.SynReplyFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(replyFrame)
+}
+
+func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: streamId,
+ Status: status,
+ }
+
+ return s.framer.WriteFrame(resetFrame)
+}
+
+func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
+ return s.sendResetFrame(status, stream.streamId)
+}
+
+func (s *Connection) sendStream(stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ stream.finished = true
+ }
+
+ var parentId spdy.StreamId
+ if stream.parent != nil {
+ parentId = stream.parent.streamId
+ }
+
+ streamFrame := &spdy.SynStreamFrame{
+ StreamId: spdy.StreamId(stream.streamId),
+ AssociatedToStreamId: spdy.StreamId(parentId),
+ Headers: stream.headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(streamFrame)
+}
+
+// getNextStreamId returns the next sequential id
+// every call should produce a unique value or an error
+func (s *Connection) getNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ if sid > 0x7fffffff {
+ return 0
+ }
+ s.nextStreamId = s.nextStreamId + 2
+ return sid
+}
+
+// PeekNextStreamId returns the next sequential id and keeps the next id untouched
+func (s *Connection) PeekNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ return sid
+}
+
+func (s *Connection) validateStreamId(rid spdy.StreamId) error {
+ if rid > 0x7fffffff || rid < s.receivedStreamId {
+ return ErrInvalidStreamId
+ }
+ s.receivedStreamId = rid + 2
+ return nil
+}
+
+func (s *Connection) addStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ s.streams[stream.streamId] = stream
+ debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) removeStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ delete(s.streams, stream.streamId)
+ debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
+ s.streamLock.RLock()
+ stream, ok = s.streams[streamId]
+ s.streamLock.RUnlock()
+ return
+}
+
+// FindStream looks up the given stream id and either waits for the
+// stream to be found or returns nil if the stream id is no longer
+// valid.
+func (s *Connection) FindStream(streamId uint32) *Stream {
+ var stream *Stream
+ var ok bool
+ s.streamCond.L.Lock()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
+ for !ok && streamId >= uint32(s.receivedStreamId) {
+ s.streamCond.Wait()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ }
+ s.streamCond.L.Unlock()
+ return stream
+}
+
+func (s *Connection) CloseChan() <-chan bool {
+ return s.closeChan
+}
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
new file mode 100644
index 000000000..b59fa5fdc
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/handlers.go
@@ -0,0 +1,38 @@
+package spdystream
+
+import (
+ "io"
+ "net/http"
+)
+
+// MirrorStreamHandler mirrors all streams.
+func MirrorStreamHandler(stream *Stream) {
+ replyErr := stream.SendReply(http.Header{}, false)
+ if replyErr != nil {
+ return
+ }
+
+ go func() {
+ io.Copy(stream, stream)
+ stream.Close()
+ }()
+ go func() {
+ for {
+ header, receiveErr := stream.ReceiveHeader()
+ if receiveErr != nil {
+ return
+ }
+ sendErr := stream.SendHeader(header, false)
+ if sendErr != nil {
+ return
+ }
+ }
+ }()
+}
+
+// NoopStreamHandler does nothing when stream connects, most
+// likely used with RejectAuthHandler which will not allow any
+// streams to make it to the stream handler.
+func NoOpStreamHandler(stream *Stream) {
+ stream.SendReply(http.Header{}, false)
+}
diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go
new file mode 100644
index 000000000..fc8582b5c
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/priority.go
@@ -0,0 +1,98 @@
+package spdystream
+
+import (
+ "container/heap"
+ "sync"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+type prioritizedFrame struct {
+ frame spdy.Frame
+ priority uint8
+ insertId uint64
+}
+
+type frameQueue []*prioritizedFrame
+
+func (fq frameQueue) Len() int {
+ return len(fq)
+}
+
+func (fq frameQueue) Less(i, j int) bool {
+ if fq[i].priority == fq[j].priority {
+ return fq[i].insertId < fq[j].insertId
+ }
+ return fq[i].priority < fq[j].priority
+}
+
+func (fq frameQueue) Swap(i, j int) {
+ fq[i], fq[j] = fq[j], fq[i]
+}
+
+func (fq *frameQueue) Push(x interface{}) {
+ *fq = append(*fq, x.(*prioritizedFrame))
+}
+
+func (fq *frameQueue) Pop() interface{} {
+ old := *fq
+ n := len(old)
+ *fq = old[0 : n-1]
+ return old[n-1]
+}
+
+type PriorityFrameQueue struct {
+ queue *frameQueue
+ c *sync.Cond
+ size int
+ nextInsertId uint64
+ drain bool
+}
+
+func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
+ queue := make(frameQueue, 0, size)
+ heap.Init(&queue)
+
+ return &PriorityFrameQueue{
+ queue: &queue,
+ size: size,
+ c: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() >= q.size {
+ q.c.Wait()
+ }
+ pFrame := &prioritizedFrame{
+ frame: frame,
+ priority: priority,
+ insertId: q.nextInsertId,
+ }
+ q.nextInsertId = q.nextInsertId + 1
+ heap.Push(q.queue, pFrame)
+ q.c.Signal()
+}
+
+func (q *PriorityFrameQueue) Pop() spdy.Frame {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() == 0 {
+ if q.drain {
+ return nil
+ }
+ q.c.Wait()
+ }
+ frame := heap.Pop(q.queue).(*prioritizedFrame).frame
+ q.c.Signal()
+ return frame
+}
+
+func (q *PriorityFrameQueue) Drain() {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ q.drain = true
+ q.c.Broadcast()
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go
new file mode 100644
index 000000000..5a5ff0e14
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go
@@ -0,0 +1,187 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
+var headerDictionary = []byte{
+ 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
+ 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
+ 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
+ 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
+ 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
+ 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
+ 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
+ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
+ 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
+ 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
+ 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
+ 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
+ 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
+ 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
+ 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
+ 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
+ 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
+ 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
+ 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
+ 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
+ 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
+ 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
+ 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
+ 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
+ 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+ 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
+ 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
+ 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
+ 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
+ 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
+ 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
+ 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
+ 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
+ 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
+ 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
+ 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
+ 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
+ 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
+ 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
+ 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
+ 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
+ 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
+ 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
+ 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
+ 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
+ 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
+ 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
+ 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
+ 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
+ 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
+ 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
+ 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
+ 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
+ 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
+ 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
+ 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
+ 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
+ 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
+ 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
+ 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
+ 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
+ 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
+ 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
+ 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
+ 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
+ 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
+ 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
+ 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
+ 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
+ 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
+ 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
+ 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
+ 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
+ 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
+ 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
+ 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
+ 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
+ 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
+ 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
+ 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
+ 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
+ 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
+ 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
+ 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
+ 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
+ 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
+ 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
+ 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
+ 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
+ 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
+ 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
+ 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
+ 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
+ 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
+ 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
+ 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
+ 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
+ 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
+ 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
+ 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
+ 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
+ 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
+ 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
+ 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
+ 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go
new file mode 100644
index 000000000..9359a9501
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/read.go
@@ -0,0 +1,348 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "compress/zlib"
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynStreamFrame(h, frame)
+}
+
+func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynReplyFrame(h, frame)
+}
+
+func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ var numSettings uint32
+ if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
+ return err
+ }
+ frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
+ for i := uint32(0); i < numSettings; i++ {
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
+ return err
+ }
+ frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
+ frame.FlagIdValues[i].Id &= 0xffffff
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
+ return err
+ }
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, StreamId(frame.Id)}
+ }
+ return nil
+}
+
+func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readHeadersFrame(h, frame)
+}
+
+func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
+ ctor, ok := cframeCtor[frameType]
+ if !ok {
+ return nil, &Error{Err: InvalidControlFrame}
+ }
+ return ctor(), nil
+}
+
+var cframeCtor = map[ControlFrameType]func() controlFrame{
+ TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
+ TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
+ TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
+ TypeSettings: func() controlFrame { return new(SettingsFrame) },
+ TypePing: func() controlFrame { return new(PingFrame) },
+ TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
+ TypeHeaders: func() controlFrame { return new(HeadersFrame) },
+ TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
+}
+
+func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
+ if f.headerDecompressor != nil {
+ f.headerReader.N = payloadSize
+ return nil
+ }
+ f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
+ decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
+ if err != nil {
+ return err
+ }
+ f.headerDecompressor = decompressor
+ return nil
+}
+
+// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
+func (f *Framer) ReadFrame() (Frame, error) {
+ var firstWord uint32
+ if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
+ return nil, err
+ }
+ if firstWord&0x80000000 != 0 {
+ frameType := ControlFrameType(firstWord & 0xffff)
+ version := uint16(firstWord >> 16 & 0x7fff)
+ return f.parseControlFrame(version, frameType)
+ }
+ return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
+}
+
+func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ flags := ControlFlags((length & 0xff000000) >> 24)
+ length &= 0xffffff
+ header := ControlFrameHeader{version, frameType, flags, length}
+ cframe, err := newControlFrame(frameType)
+ if err != nil {
+ return nil, err
+ }
+ if err = cframe.read(header, f); err != nil {
+ return nil, err
+ }
+ return cframe, nil
+}
+
+func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
+ var numHeaders uint32
+ if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
+ return nil, err
+ }
+ var e error
+ h := make(http.Header, int(numHeaders))
+ for i := 0; i < int(numHeaders); i++ {
+ var length uint32
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ nameBytes := make([]byte, length)
+ if _, err := io.ReadFull(r, nameBytes); err != nil {
+ return nil, err
+ }
+ name := string(nameBytes)
+ if name != strings.ToLower(name) {
+ e = &Error{UnlowercasedHeaderName, streamId}
+ name = strings.ToLower(name)
+ }
+ if h[name] != nil {
+ e = &Error{DuplicateHeaders, streamId}
+ }
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ value := make([]byte, length)
+ if _, err := io.ReadFull(r, value); err != nil {
+ return nil, err
+ }
+ valueList := strings.Split(string(value), headerValueSeparator)
+ for _, v := range valueList {
+ h.Add(name, v)
+ }
+ }
+ if e != nil {
+ return h, e
+ }
+ return h, nil
+}
+
+func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
+ return err
+ }
+ frame.Priority >>= 5
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 10))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidReqHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidRespHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ var invalidHeaders map[string]bool
+ if frame.StreamId%2 == 0 {
+ invalidHeaders = invalidReqHeaders
+ } else {
+ invalidHeaders = invalidRespHeaders
+ }
+ for h := range frame.Headers {
+ if invalidHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ var frame DataFrame
+ frame.StreamId = streamId
+ frame.Flags = DataFlags(length >> 24)
+ length &= 0xffffff
+ frame.Data = make([]byte, length)
+ if _, err := io.ReadFull(f.r, frame.Data); err != nil {
+ return nil, err
+ }
+ if frame.StreamId == 0 {
+ return nil, &Error{ZeroStreamId, 0}
+ }
+ return &frame, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go
new file mode 100644
index 000000000..7b6ee9c6f
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/types.go
@@ -0,0 +1,275 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package spdy implements the SPDY protocol (currently SPDY/3), described in
+// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
+package spdy
+
+import (
+ "bytes"
+ "compress/zlib"
+ "io"
+ "net/http"
+)
+
+// Version is the protocol version number that this package implements.
+const Version = 3
+
+// ControlFrameType stores the type field in a control frame header.
+type ControlFrameType uint16
+
+const (
+ TypeSynStream ControlFrameType = 0x0001
+ TypeSynReply = 0x0002
+ TypeRstStream = 0x0003
+ TypeSettings = 0x0004
+ TypePing = 0x0006
+ TypeGoAway = 0x0007
+ TypeHeaders = 0x0008
+ TypeWindowUpdate = 0x0009
+)
+
+// ControlFlags are the flags that can be set on a control frame.
+type ControlFlags uint8
+
+const (
+ ControlFlagFin ControlFlags = 0x01
+ ControlFlagUnidirectional = 0x02
+ ControlFlagSettingsClearSettings = 0x01
+)
+
+// DataFlags are the flags that can be set on a data frame.
+type DataFlags uint8
+
+const (
+ DataFlagFin DataFlags = 0x01
+)
+
+// MaxDataLength is the maximum number of bytes that can be stored in one frame.
+const MaxDataLength = 1<<24 - 1
+
+// headerValueSepator separates multiple header values.
+const headerValueSeparator = "\x00"
+
+// Frame is a single SPDY frame in its unpacked in-memory representation. Use
+// Framer to read and write it.
+type Frame interface {
+ write(f *Framer) error
+}
+
+// ControlFrameHeader contains all the fields in a control frame header,
+// in its unpacked in-memory representation.
+type ControlFrameHeader struct {
+ // Note, high bit is the "Control" bit.
+ version uint16 // spdy version number
+ frameType ControlFrameType
+ Flags ControlFlags
+ length uint32 // length of data field
+}
+
+type controlFrame interface {
+ Frame
+ read(h ControlFrameHeader, f *Framer) error
+}
+
+// StreamId represents a 31-bit value identifying the stream.
+type StreamId uint32
+
+// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
+// frame.
+type SynStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
+ Priority uint8 // priority of this frame (3-bit)
+ Slot uint8 // index in the server's credential vector of the client certificate
+ Headers http.Header
+}
+
+// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
+type SynReplyFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// RstStreamStatus represents the status that led to a RST_STREAM.
+type RstStreamStatus uint32
+
+const (
+ ProtocolError RstStreamStatus = iota + 1
+ InvalidStream
+ RefusedStream
+ UnsupportedVersion
+ Cancel
+ InternalError
+ FlowControlError
+ StreamInUse
+ StreamAlreadyClosed
+ InvalidCredentials
+ FrameTooLarge
+)
+
+// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
+// frame.
+type RstStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Status RstStreamStatus
+}
+
+// SettingsFlag represents a flag in a SETTINGS frame.
+type SettingsFlag uint8
+
+const (
+ FlagSettingsPersistValue SettingsFlag = 0x1
+ FlagSettingsPersisted = 0x2
+)
+
+// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
+type SettingsId uint32
+
+const (
+ SettingsUploadBandwidth SettingsId = iota + 1
+ SettingsDownloadBandwidth
+ SettingsRoundTripTime
+ SettingsMaxConcurrentStreams
+ SettingsCurrentCwnd
+ SettingsDownloadRetransRate
+ SettingsInitialWindowSize
+ SettingsClientCretificateVectorSize
+)
+
+// SettingsFlagIdValue is the unpacked, in-memory representation of the
+// combined flag/id/value for a setting in a SETTINGS frame.
+type SettingsFlagIdValue struct {
+ Flag SettingsFlag
+ Id SettingsId
+ Value uint32
+}
+
+// SettingsFrame is the unpacked, in-memory representation of a SPDY
+// SETTINGS frame.
+type SettingsFrame struct {
+ CFHeader ControlFrameHeader
+ FlagIdValues []SettingsFlagIdValue
+}
+
+// PingFrame is the unpacked, in-memory representation of a PING frame.
+type PingFrame struct {
+ CFHeader ControlFrameHeader
+ Id uint32 // unique id for this ping, from server is even, from client is odd.
+}
+
+// GoAwayStatus represents the status in a GoAwayFrame.
+type GoAwayStatus uint32
+
+const (
+ GoAwayOK GoAwayStatus = iota
+ GoAwayProtocolError
+ GoAwayInternalError
+)
+
+// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
+type GoAwayFrame struct {
+ CFHeader ControlFrameHeader
+ LastGoodStreamId StreamId // last stream id which was accepted by sender
+ Status GoAwayStatus
+}
+
+// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
+type HeadersFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// WindowUpdateFrame is the unpacked, in-memory representation of a
+// WINDOW_UPDATE frame.
+type WindowUpdateFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ DeltaWindowSize uint32 // additional number of bytes to existing window size
+}
+
+// TODO: Implement credential frame and related methods.
+
+// DataFrame is the unpacked, in-memory representation of a DATA frame.
+type DataFrame struct {
+ // Note, high bit is the "Control" bit. Should be 0 for data frames.
+ StreamId StreamId
+ Flags DataFlags
+ Data []byte // payload data of this frame
+}
+
+// A SPDY specific error.
+type ErrorCode string
+
+const (
+ UnlowercasedHeaderName ErrorCode = "header was not lowercased"
+ DuplicateHeaders = "multiple headers with same name"
+ WrongCompressedPayloadSize = "compressed payload size was incorrect"
+ UnknownFrameType = "unknown frame type"
+ InvalidControlFrame = "invalid control frame"
+ InvalidDataFrame = "invalid data frame"
+ InvalidHeaderPresent = "frame contained invalid header"
+ ZeroStreamId = "stream id zero is disallowed"
+)
+
+// Error contains both the type of error and additional values. StreamId is 0
+// if Error is not associated with a stream.
+type Error struct {
+ Err ErrorCode
+ StreamId StreamId
+}
+
+func (e *Error) Error() string {
+ return string(e.Err)
+}
+
+var invalidReqHeaders = map[string]bool{
+ "Connection": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+var invalidRespHeaders = map[string]bool{
+ "Connection": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+// Framer handles serializing/deserializing SPDY frames, including compressing/
+// decompressing payloads.
+type Framer struct {
+ headerCompressionDisabled bool
+ w io.Writer
+ headerBuf *bytes.Buffer
+ headerCompressor *zlib.Writer
+ r io.Reader
+ headerReader io.LimitedReader
+ headerDecompressor io.ReadCloser
+}
+
+// NewFramer allocates a new Framer for a given SPDY connection, represented by
+// a io.Writer and io.Reader. Note that Framer will read and write individual fields
+// from/to the Reader and Writer, so the caller should pass in an appropriately
+// buffered implementation to optimize performance.
+func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
+ compressBuf := new(bytes.Buffer)
+ compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
+ if err != nil {
+ return nil, err
+ }
+ framer := &Framer{
+ w: w,
+ headerBuf: compressBuf,
+ headerCompressor: compressor,
+ r: r,
+ }
+ return framer, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go
new file mode 100644
index 000000000..b212f66a2
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/write.go
@@ -0,0 +1,318 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) write(f *Framer) error {
+ return f.writeSynStreamFrame(frame)
+}
+
+func (frame *SynReplyFrame) write(f *Framer) error {
+ return f.writeSynReplyFrame(frame)
+}
+
+func (frame *RstStreamFrame) write(f *Framer) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeRstStream
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *SettingsFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSettings
+ frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
+ return
+ }
+ for _, flagIdValue := range frame.FlagIdValues {
+ flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
+ if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (frame *PingFrame) write(f *Framer) (err error) {
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypePing
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 4
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *GoAwayFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeGoAway
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) write(f *Framer) error {
+ return f.writeHeadersFrame(frame)
+}
+
+func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeWindowUpdate
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *DataFrame) write(f *Framer) error {
+ return f.writeDataFrame(frame)
+}
+
+// WriteFrame writes a frame.
+func (f *Framer) WriteFrame(frame Frame) error {
+ return frame.write(f)
+}
+
+func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
+ if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
+ return err
+ }
+ if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
+ return err
+ }
+ flagsAndLength := uint32(h.Flags)<<24 | h.length
+ if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
+ return err
+ }
+ return nil
+}
+
+func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
+ n = 0
+ if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
+ return
+ }
+ n += 2
+ for name, values := range h {
+ if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
+ return
+ }
+ n += 2
+ name = strings.ToLower(name)
+ if _, err = io.WriteString(w, name); err != nil {
+ return
+ }
+ n += len(name)
+ v := strings.Join(values, headerValueSeparator)
+ if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
+ return
+ }
+ n += 2
+ if _, err = io.WriteString(w, v); err != nil {
+ return
+ }
+ n += len(v)
+ }
+ return
+}
+
+func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynStream
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
+ return err
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return err
+ }
+ f.headerBuf.Reset()
+ return nil
+}
+
+func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynReply
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeHeaders
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
+ return &Error{InvalidDataFrame, frame.StreamId}
+ }
+
+ // Serialize frame to Writer.
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
+ if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
+ return
+ }
+ if _, err = f.w.Write(frame.Data); err != nil {
+ return
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go
new file mode 100644
index 000000000..f9e9ee267
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/stream.go
@@ -0,0 +1,327 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrUnreadPartialData = errors.New("unread partial data")
+)
+
+type Stream struct {
+ streamId spdy.StreamId
+ parent *Stream
+ conn *Connection
+ startChan chan error
+
+ dataLock sync.RWMutex
+ dataChan chan []byte
+ unread []byte
+
+ priority uint8
+ headers http.Header
+ headerChan chan http.Header
+ finishLock sync.Mutex
+ finished bool
+ replyCond *sync.Cond
+ replied bool
+ closeLock sync.Mutex
+ closeChan chan bool
+}
+
+// WriteData writes data to stream, sending a dataframe per call
+func (s *Stream) WriteData(data []byte, fin bool) error {
+ s.waitWriteReply()
+ var flags spdy.DataFlags
+
+ if fin {
+ flags = spdy.DataFlagFin
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return ErrWriteClosedStream
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+ }
+
+ dataFrame := &spdy.DataFrame{
+ StreamId: s.streamId,
+ Flags: flags,
+ Data: data,
+ }
+
+ debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
+ return s.conn.framer.WriteFrame(dataFrame)
+}
+
+// Write writes bytes to a stream, calling write data for each call.
+func (s *Stream) Write(data []byte) (n int, err error) {
+ err = s.WriteData(data, false)
+ if err == nil {
+ n = len(data)
+ }
+ return
+}
+
+// Read reads bytes from a stream, a single read will never get more
+// than what is sent on a single data frame, but a multiple calls to
+// read may get data from the same data frame.
+func (s *Stream) Read(p []byte) (n int, err error) {
+ if s.unread == nil {
+ select {
+ case <-s.closeChan:
+ return 0, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return 0, io.EOF
+ }
+ s.unread = read
+ }
+ }
+ n = copy(p, s.unread)
+ if n < len(s.unread) {
+ s.unread = s.unread[n:]
+ } else {
+ s.unread = nil
+ }
+ return
+}
+
+// ReadData reads an entire data frame and returns the byte array
+// from the data frame. If there is unread data from the result
+// of a Read call, this function will return an ErrUnreadPartialData.
+func (s *Stream) ReadData() ([]byte, error) {
+ debugMessage("(%p) Reading data from %d", s, s.streamId)
+ if s.unread != nil {
+ return nil, ErrUnreadPartialData
+ }
+ select {
+ case <-s.closeChan:
+ return nil, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return nil, io.EOF
+ }
+ return read, nil
+ }
+}
+
+func (s *Stream) waitWriteReply() {
+ if s.replyCond != nil {
+ s.replyCond.L.Lock()
+ for !s.replied {
+ s.replyCond.Wait()
+ }
+ s.replyCond.L.Unlock()
+ }
+}
+
+// Wait waits for the stream to receive a reply.
+func (s *Stream) Wait() error {
+ return s.WaitTimeout(time.Duration(0))
+}
+
+// WaitTimeout waits for the stream to receive a reply or for timeout.
+// When the timeout is reached, ErrTimeout will be returned.
+func (s *Stream) WaitTimeout(timeout time.Duration) error {
+ var timeoutChan <-chan time.Time
+ if timeout > time.Duration(0) {
+ timeoutChan = time.After(timeout)
+ }
+
+ select {
+ case err := <-s.startChan:
+ if err != nil {
+ return err
+ }
+ break
+ case <-timeoutChan:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// Close closes the stream by sending an empty data frame with the
+// finish flag set, indicating this side is finished with the stream.
+func (s *Stream) Close() error {
+ select {
+ case <-s.closeChan:
+ // Stream is now fully closed
+ s.conn.removeStream(s)
+ default:
+ break
+ }
+ return s.WriteData([]byte{}, true)
+}
+
+// Reset sends a reset frame, putting the stream into the fully closed state.
+func (s *Stream) Reset() error {
+ s.conn.removeStream(s)
+ return s.resetStream()
+}
+
+func (s *Stream) resetStream() error {
+ // Always call closeRemoteChannels, even if s.finished is already true.
+ // This makes it so that stream.Close() followed by stream.Reset() allows
+ // stream.Read() to unblock.
+ s.closeRemoteChannels()
+
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return nil
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: s.streamId,
+ Status: spdy.Cancel,
+ }
+ return s.conn.framer.WriteFrame(resetFrame)
+}
+
+// CreateSubStream creates a stream using the current as the parent
+func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
+ return s.conn.CreateStream(headers, s, fin)
+}
+
+// SetPriority sets the stream priority, does not affect the
+// remote priority of this stream after Open has been called.
+// Valid values are 0 through 7, 0 being the highest priority
+// and 7 the lowest.
+func (s *Stream) SetPriority(priority uint8) {
+ s.priority = priority
+}
+
+// SendHeader sends a header frame across the stream
+func (s *Stream) SendHeader(headers http.Header, fin bool) error {
+ return s.conn.sendHeaders(headers, s, fin)
+}
+
+// SendReply sends a reply on a stream, only valid to be called once
+// when handling a new stream
+func (s *Stream) SendReply(headers http.Header, fin bool) error {
+ if s.replyCond == nil {
+ return errors.New("cannot reply on initiated stream")
+ }
+ s.replyCond.L.Lock()
+ defer s.replyCond.L.Unlock()
+ if s.replied {
+ return nil
+ }
+
+ err := s.conn.sendReply(headers, s, fin)
+ if err != nil {
+ return err
+ }
+
+ s.replied = true
+ s.replyCond.Broadcast()
+ return nil
+}
+
+// Refuse sends a reset frame with the status refuse, only
+// valid to be called once when handling a new stream. This
+// may be used to indicate that a stream is not allowed
+// when http status codes are not being used.
+func (s *Stream) Refuse() error {
+ if s.replied {
+ return nil
+ }
+ s.replied = true
+ return s.conn.sendReset(spdy.RefusedStream, s)
+}
+
+// Cancel sends a reset frame with the status canceled. This
+// can be used at any time by the creator of the Stream to
+// indicate the stream is no longer needed.
+func (s *Stream) Cancel() error {
+ return s.conn.sendReset(spdy.Cancel, s)
+}
+
+// ReceiveHeader receives a header sent on the other side
+// of the stream. This function will block until a header
+// is received or stream is closed.
+func (s *Stream) ReceiveHeader() (http.Header, error) {
+ select {
+ case <-s.closeChan:
+ break
+ case header, ok := <-s.headerChan:
+ if !ok {
+ return nil, fmt.Errorf("header chan closed")
+ }
+ return header, nil
+ }
+ return nil, fmt.Errorf("stream closed")
+}
+
+// Parent returns the parent stream
+func (s *Stream) Parent() *Stream {
+ return s.parent
+}
+
+// Headers returns the headers used to create the stream
+func (s *Stream) Headers() http.Header {
+ return s.headers
+}
+
+// String returns the string version of stream using the
+// streamId to uniquely identify the stream
+func (s *Stream) String() string {
+ return fmt.Sprintf("stream:%d", s.streamId)
+}
+
+// Identifier returns a 32 bit identifier for the stream
+func (s *Stream) Identifier() uint32 {
+ return uint32(s.streamId)
+}
+
+// IsFinished returns whether the stream has finished
+// sending data
+func (s *Stream) IsFinished() bool {
+ return s.finished
+}
+
+// Implement net.Conn interface
+
+func (s *Stream) LocalAddr() net.Addr {
+ return s.conn.conn.LocalAddr()
+}
+
+func (s *Stream) RemoteAddr() net.Addr {
+ return s.conn.conn.RemoteAddr()
+}
+
+// TODO set per stream values instead of connection-wide
+
+func (s *Stream) SetDeadline(t time.Time) error {
+ return s.conn.conn.SetDeadline(t)
+}
+
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ return s.conn.conn.SetReadDeadline(t)
+}
+
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ return s.conn.conn.SetWriteDeadline(t)
+}
+
+func (s *Stream) closeRemoteChannels() {
+ s.closeLock.Lock()
+ defer s.closeLock.Unlock()
+ select {
+ case <-s.closeChan:
+ default:
+ close(s.closeChan)
+ }
+}
diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go
new file mode 100644
index 000000000..1b2c199a4
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/utils.go
@@ -0,0 +1,16 @@
+package spdystream
+
+import (
+ "log"
+ "os"
+)
+
+var (
+ DEBUG = os.Getenv("DEBUG")
+)
+
+func debugMessage(fmt string, args ...interface{}) {
+ if DEBUG != "" {
+ log.Printf(fmt, args...)
+ }
+}